docker-api-2.4.0/0000755000004100000410000000000014742317714013575 5ustar www-datawww-datadocker-api-2.4.0/lib/0000755000004100000410000000000014742317714014343 5ustar www-datawww-datadocker-api-2.4.0/lib/docker.rb0000644000004100000410000000702014742317714016136 0ustar www-datawww-data# frozen_string_literal: true require 'cgi' require 'multi_json' require 'excon' require 'tempfile' require 'base64' require 'find' require 'rubygems/package' require 'uri' require 'open-uri' # Add the Hijack middleware at the top of the middleware stack so it can # potentially hijack HTTP sockets (when attaching to stdin) before other # middlewares try and parse the response. require 'excon/middlewares/hijack' Excon.defaults[:middlewares].unshift Excon::Middleware::Hijack Excon.defaults[:middlewares] << Excon::Middleware::RedirectFollower # The top-level module for this gem. Its purpose is to hold global # configuration variables that are used as defaults in other classes. module Docker attr_accessor :creds, :logger require 'docker/error' require 'docker/connection' require 'docker/base' require 'docker/container' require 'docker/network' require 'docker/event' require 'docker/exec' require 'docker/image' require 'docker/messages_stack' require 'docker/messages' require 'docker/util' require 'docker/version' require 'docker/volume' require 'docker/rake_task' if defined?(Rake::Task) def default_socket_url 'unix:///var/run/docker.sock' end def env_url ENV['DOCKER_URL'] || ENV['DOCKER_HOST'] end def env_options if cert_path = ENV['DOCKER_CERT_PATH'] { client_cert: File.join(cert_path, 'cert.pem'), client_key: File.join(cert_path, 'key.pem'), ssl_ca_file: File.join(cert_path, 'ca.pem'), scheme: 'https' }.merge(ssl_options) else {} end end def ssl_options if ENV['DOCKER_SSL_VERIFY'] == 'false' { ssl_verify_peer: false } else {} end end def url @url ||= env_url || default_socket_url # docker uses a default notation tcp:// which means tcp://localhost:2375 if @url == 'tcp://' @url = 'tcp://localhost:2375' end @url end def options @options ||= env_options end def url=(new_url) @url = new_url reset_connection! end def options=(new_options) @options = env_options.merge(new_options || {}) reset_connection! end def connection @connection ||= Connection.new(url, options) end def reset! @url = nil @options = nil reset_connection! end def reset_connection! @connection = nil end # Get the version of Go, Docker, and optionally the Git commit. def version(connection = self.connection) connection.version end # Get more information about the Docker server. def info(connection = self.connection) connection.info end # Ping the Docker server. def ping(connection = self.connection) connection.ping end # Determine if the server is podman or docker. def podman?(connection = self.connection) connection.podman? end # Determine if the session is rootless. def rootless?(connection = self.connection) connection.rootless? end # Login to the Docker registry. def authenticate!(options = {}, connection = self.connection) creds = MultiJson.dump(options) connection.post('/auth', {}, body: creds) @creds = creds true rescue Docker::Error::ServerError, Docker::Error::UnauthorizedError raise Docker::Error::AuthenticationError end module_function :default_socket_url, :env_url, :url, :url=, :env_options, :options, :options=, :creds, :creds=, :logger, :logger=, :connection, :reset!, :reset_connection!, :version, :info, :ping, :podman?, :rootless?, :authenticate!, :ssl_options end docker-api-2.4.0/lib/docker/0000755000004100000410000000000014742317714015612 5ustar www-datawww-datadocker-api-2.4.0/lib/docker/container.rb0000644000004100000410000002476614742317714020140 0ustar www-datawww-data# frozen_string_literal: true # This class represents a Docker Container. It's important to note that nothing # is cached so that the information is always up to date. class Docker::Container include Docker::Base # Update the @info hash, which is the only mutable state in this object. # e.g. if you would like a live status from the #info hash, call #refresh! first. def refresh! other = Docker::Container.all({all: true}, connection).find { |c| c.id.start_with?(self.id) || self.id.start_with?(c.id) } info.merge!(self.json) other && info.merge!(other.info) { |key, info_value, other_value| info_value } self end # Return a List of Hashes that represents the top running processes. def top(opts = {}) format = opts.delete(:format) { :array } resp = Docker::Util.parse_json(connection.get(path_for(:top), opts)) if resp['Processes'].nil? format == :array ? [] : {} else format == :array ? resp['Processes'].map { |ary| Hash[resp['Titles'].zip(ary)] } : resp end end # Wait for the current command to finish executing. Default wait time is # `Excon.options[:read_timeout]`. def wait(time = nil) excon_params = { :read_timeout => time } resp = connection.post(path_for(:wait), nil, excon_params) Docker::Util.parse_json(resp) end # Given a command and an optional number of seconds to wait for the currently # executing command, creates a new Container to run the specified command. If # the command that is currently executing does not return a 0 status code, an # UnexpectedResponseError is raised. def run(cmd, time = 1000) if (code = tap(&:start).wait(time)['StatusCode']).zero? commit.run(cmd) else raise UnexpectedResponseError, "Command returned status code #{code}." end end # Create an Exec instance inside the container # # @param command [String, Array] The command to run inside the Exec instance # @param options [Hash] The options to pass to Docker::Exec # # @return [Docker::Exec] The Exec instance def exec(command, options = {}, &block) # Establish values tty = options.delete(:tty) || false detach = options.delete(:detach) || false user = options.delete(:user) stdin = options.delete(:stdin) stdout = options.delete(:stdout) || !detach stderr = options.delete(:stderr) || !detach wait = options.delete(:wait) opts = { 'Container' => self.id, 'User' => user, 'AttachStdin' => !!stdin, 'AttachStdout' => stdout, 'AttachStderr' => stderr, 'Tty' => tty, 'Cmd' => command }.merge(options) # Create Exec Instance instance = Docker::Exec.create( opts, self.connection ) start_opts = { :tty => tty, :stdin => stdin, :detach => detach, :wait => wait } if detach instance.start!(start_opts) return instance else instance.start!(start_opts, &block) end end # Export the Container as a tar. def export(&block) connection.get(path_for(:export), {}, :response_block => block) self end # Attach to a container's standard streams / logs. def attach(options = {}, excon_params = {}, &block) stdin = options.delete(:stdin) tty = options.delete(:tty) opts = { :stream => true, :stdout => true, :stderr => true }.merge(options) # Creates list to store stdout and stderr messages msgs = Docker::Messages.new if stdin # If attaching to stdin, we must hijack the underlying TCP connection # so we can stream stdin to the remote Docker process opts[:stdin] = true excon_params[:hijack_block] = Docker::Util.hijack_for(stdin, block, msgs, tty) else excon_params[:response_block] = Docker::Util.attach_for(block, msgs, tty) end connection.post( path_for(:attach), opts, excon_params ) [msgs.stdout_messages, msgs.stderr_messages] end # Create an Image from a Container's change.s def commit(options = {}) options.merge!('container' => self.id[0..7]) # [code](https://github.com/dotcloud/docker/blob/v0.6.3/commands.go#L1115) # Based on the link, the config passed as run, needs to be passed as the # body of the post so capture it, remove from the options, and pass it via # the post body config = MultiJson.dump(options.delete('run')) hash = Docker::Util.parse_json( connection.post('/commit', options, body: config) ) Docker::Image.send(:new, self.connection, hash) end # Return a String representation of the Container. def to_s "Docker::Container { :id => #{self.id}, :connection => #{self.connection} }" end # #json returns information about the Container, #changes returns a list of # the changes the Container has made to the filesystem. [:json, :changes].each do |method| define_method(method) do |opts = {}| Docker::Util.parse_json(connection.get(path_for(method), opts)) end end def logs(opts = {}) connection.get(path_for(:logs), opts) end def stats(options = {}) if block_given? options[:read_timeout] ||= 10 options[:idempotent] ||= false parser = lambda do |chunk, remaining_bytes, total_bytes| yield Docker::Util.parse_json(chunk) end begin connection.get(path_for(:stats), nil, {response_block: parser}.merge(options)) rescue Docker::Error::TimeoutError # If the container stops, the docker daemon will hold the connection # open forever, but stop sending events. # So this Timeout indicates the stream is over. end else Docker::Util.parse_json(connection.get(path_for(:stats), {stream: 0}.merge(options))) end end def rename(new_name) query = {} query['name'] = new_name connection.post(path_for(:rename), query) end def update(opts) connection.post(path_for(:update), {}, body: MultiJson.dump(opts)) end def streaming_logs(opts = {}, &block) stack_size = opts.delete('stack_size') || opts.delete(:stack_size) || -1 tty = opts.delete('tty') || opts.delete(:tty) || false msgs = Docker::MessagesStack.new(stack_size) excon_params = {response_block: Docker::Util.attach_for(block, msgs, tty), idempotent: false} connection.get(path_for(:logs), opts, excon_params) msgs.messages.join end def start!(opts = {}) connection.post(path_for(:start), {}, body: MultiJson.dump(opts)) self end def kill!(opts = {}) connection.post(path_for(:kill), opts) self end # #start! and #kill! both perform the associated action and # return the Container. #start and #kill do the same, # but rescue from ServerErrors. [:start, :kill].each do |method| define_method(method) do |*args| begin; public_send(:"#{method}!", *args); rescue ServerError; self end end end # #stop! and #restart! both perform the associated action and # return the Container. #stop and #restart do the same, # but rescue from ServerErrors. [:stop, :restart].each do |method| define_method(:"#{method}!") do |opts = {}| timeout = opts.delete('timeout') query = {} request_options = { :body => MultiJson.dump(opts) } if timeout query['t'] = timeout # Ensure request does not timeout before Docker timeout request_options.merge!( read_timeout: timeout.to_i + 5, write_timeout: timeout.to_i + 5 ) end connection.post(path_for(method), query, request_options) self end define_method(method) do |*args| begin; public_send(:"#{method}!", *args); rescue ServerError; self end end end # remove container def remove(options = {}) connection.delete("/containers/#{self.id}", options) nil end alias_method :delete, :remove # pause and unpause containers # #pause! and #unpause! both perform the associated action and # return the Container. #pause and #unpause do the same, # but rescue from ServerErrors. [:pause, :unpause].each do |method| define_method(:"#{method}!") do connection.post path_for(method) self end define_method(method) do begin; public_send(:"#{method}!"); rescue ServerError; self; end end end def archive_out(path, &block) connection.get( path_for(:archive), { 'path' => path }, :response_block => block ) self end def archive_in(inputs, output_path, opts = {}) file_hash = Docker::Util.file_hash_from_paths([*inputs]) tar = StringIO.new(Docker::Util.create_tar(file_hash)) archive_in_stream(output_path, opts) do tar.read(Excon.defaults[:chunk_size]).to_s end end def archive_in_stream(output_path, opts = {}, &block) overwrite = opts[:overwrite] || opts['overwrite'] || false connection.put( path_for(:archive), { 'path' => output_path, 'noOverwriteDirNonDir' => !overwrite }, :headers => { 'Content-Type' => 'application/x-tar' }, &block ) self end def read_file(path) content = StringIO.new archive_out(path) do |chunk| content.write chunk end content.rewind Gem::Package::TarReader.new(content) do |tar| tar.each do |tarfile| return tarfile.read end end end def store_file(path, file_content) output_io = StringIO.new( Docker::Util.create_tar( path => file_content ) ) archive_in_stream("/", overwrite: true) { output_io.read } end # Create a new Container. def self.create(opts = {}, conn = Docker.connection) query = opts.select {|key| ['name', :name].include?(key) } clean_opts = opts.reject {|key| ['name', :name].include?(key) } resp = conn.post('/containers/create', query, :body => MultiJson.dump(clean_opts)) hash = Docker::Util.parse_json(resp) || {} new(conn, hash) end # Return the container with specified ID def self.get(id, opts = {}, conn = Docker.connection) container_json = conn.get("/containers/#{id}/json", opts) hash = Docker::Util.parse_json(container_json) || {} new(conn, hash) end # Return all of the Containers. def self.all(opts = {}, conn = Docker.connection) hashes = Docker::Util.parse_json(conn.get('/containers/json', opts)) || [] hashes.map { |hash| new(conn, hash) } end # Prune images def self.prune(conn = Docker.connection) conn.post("/containers/prune", {}) nil end # Convenience method to return the path for a particular resource. def path_for(resource) "/containers/#{self.id}/#{resource}" end private :path_for private_class_method :new end docker-api-2.4.0/lib/docker/rake_task.rb0000644000004100000410000000125614742317714020107 0ustar www-datawww-data# frozen_string_literal: true # This class allows image-based tasks to be created. class Docker::ImageTask < Rake::Task def self.scope_name(_scope, task_name) task_name end def needed? !has_repo_tag? end private def has_repo_tag? images.any? { |image| image.info['RepoTags'].include?(repo_tag) } end def images @images ||= Docker::Image.all(:all => true) end def repo name.split(':')[0] end def tag name.split(':')[1] || 'latest' end def repo_tag "#{repo}:#{tag}" end end # Monkeypatch Rake to add the `image` task. module Rake::DSL def image(*args, &block) Docker::ImageTask.define_task(*args, &block) end end docker-api-2.4.0/lib/docker/messages.rb0000644000004100000410000000304014742317714017743 0ustar www-datawww-data# frozen_string_literal: true # This class represents all the messages either received by chunks from attach class Docker::Messages attr_accessor :buffer, :stdout_messages, :stderr_messages, :all_messages def initialize(stdout_messages=[], stderr_messages=[], all_messages=[], buffer="") @stdout_messages = stdout_messages @stderr_messages = stderr_messages @all_messages = all_messages @buffer = buffer end def add_message(source, message) case source when 1 stdout_messages << message when 2 stderr_messages << message end all_messages << message end def get_message(raw_text) header = raw_text.slice!(0,8) if header.length < 8 @buffer = header return end type, length = header.unpack("CxxxN") message = raw_text.slice!(0,length) if message.length < length @buffer = header + message else add_message(type, message) end end def append(messages) @stdout_messages += messages.stdout_messages @stderr_messages += messages.stderr_messages @all_messages += messages.all_messages messages.clear @all_messages end def clear stdout_messages.clear stderr_messages.clear all_messages.clear end # Method to break apart application/vnd.docker.raw-stream headers def decipher_messages(body) raw_text = buffer + body.dup messages = Docker::Messages.new while !raw_text.empty? messages.get_message(raw_text) end messages end end docker-api-2.4.0/lib/docker/error.rb0000644000004100000410000000234214742317714017271 0ustar www-datawww-data# frozen_string_literal: true # This module holds the Errors for the gem. module Docker::Error # The default error. It's never actually raised, but can be used to catch all # gem-specific errors that are thrown as they all subclass from this. class DockerError < StandardError; end # Raised when invalid arguments are passed to a method. class ArgumentError < DockerError; end # Raised when a request returns a 400. class ClientError < DockerError; end # Raised when a request returns a 401. class UnauthorizedError < DockerError; end # Raised when a request returns a 404. class NotFoundError < DockerError; end # Raised when a request returns a 409. class ConflictError < DockerError; end # Raised when a request returns a 500. class ServerError < DockerError; end # Raised when there is an unexpected response code / body. class UnexpectedResponseError < DockerError; end # Raised when there is an incompatible version of Docker. class VersionError < DockerError; end # Raised when a request times out. class TimeoutError < DockerError; end # Raised when login fails. class AuthenticationError < DockerError; end # Raised when an IO action fails. class IOError < DockerError; end end docker-api-2.4.0/lib/docker/image.rb0000644000004100000410000002746114742317714017233 0ustar www-datawww-data# frozen_string_literal: true # This class represents a Docker Image. class Docker::Image include Docker::Base # Given a command and optional list of streams to attach to, run a command on # an Image. This will not modify the Image, but rather create a new Container # to run the Image. If the image has an embedded config, no command is # necessary, but it will fail with 500 if no config is saved with the image def run(cmd = nil, options = {}) opts = {'Image' => self.id}.merge(options) opts["Cmd"] = cmd.is_a?(String) ? cmd.split(/\s+/) : cmd begin Docker::Container.create(opts, connection) .tap(&:start!) rescue ServerError, ClientError => ex if cmd raise ex else raise ex, "No command specified." end end end # Push the Image to the Docker registry. def push(creds = nil, options = {}, &block) repo_tag = options.delete(:repo_tag) || ensure_repo_tags.first raise ArgumentError, "Image is untagged" if repo_tag.nil? repo, tag = Docker::Util.parse_repo_tag(repo_tag) raise ArgumentError, "Image does not have a name to push." if repo.nil? body = +"" credentials = creds || Docker.creds || {} headers = Docker::Util.build_auth_header(credentials) opts = {:tag => tag}.merge(options) connection.post("/images/#{repo}/push", opts, :headers => headers, :response_block => self.class.response_block(body, &block)) self end # Tag the Image. def tag(opts = {}) self.info['RepoTags'] ||= [] connection.post(path_for(:tag), opts) repo = opts['repo'] || opts[:repo] tag = opts['tag'] || opts[:tag] || 'latest' self.info['RepoTags'] << "#{repo}:#{tag}" end # Given a path of a local file and the path it should be inserted, creates # a new Image that has that file. def insert_local(opts = {}) local_paths = opts.delete('localPath') output_path = opts.delete('outputPath') local_paths = [ local_paths ] unless local_paths.is_a?(Array) file_hash = Docker::Util.file_hash_from_paths(local_paths) file_hash['Dockerfile'] = dockerfile_for(file_hash, output_path) tar = Docker::Util.create_tar(file_hash) body = connection.post('/build', opts, :body => tar) self.class.send(:new, connection, 'id' => Docker::Util.extract_id(body)) end # Remove the Image from the server. def remove(opts = {}) name = opts.delete(:name) unless name if ::Docker.podman?(connection) name = self.id.split(':').last else name = self.id end end connection.delete("/images/#{name}", opts) end alias_method :delete, :remove # Return a String representation of the Image. def to_s "Docker::Image { :id => #{self.id}, :info => #{self.info.inspect}, "\ ":connection => #{self.connection} }" end # #json returns extra information about an Image, #history returns its # history. [:json, :history].each do |method| define_method(method) do |opts = {}| Docker::Util.parse_json(connection.get(path_for(method), opts)) end end # Save the image as a tarball def save(filename = nil) self.class.save(self.id, filename, connection) end # Save the image as a tarball to an IO object. def save_stream(opts = {}, &block) self.class.save_stream(self.id, opts, connection, &block) end # Update the @info hash, which is the only mutable state in this object. def refresh! img = Docker::Image.all({:all => true}, connection).find { |image| image.id.start_with?(self.id) || self.id.start_with?(image.id) } info.merge!(self.json) img && info.merge!(img.info) self end class << self # Create a new Image. def create(opts = {}, creds = nil, conn = Docker.connection, &block) credentials = creds.nil? ? Docker.creds : MultiJson.dump(creds) headers = credentials && Docker::Util.build_auth_header(credentials) || {} body = +'' conn.post( '/images/create', opts, :headers => headers, :response_block => response_block(body, &block) ) # NOTE: see associated tests for why we're looking at image#end_with? image = opts['fromImage'] || opts[:fromImage] tag = opts['tag'] || opts[:tag] image = "#{image}:#{tag}" if tag && !image.end_with?(":#{tag}") get(image, {}, conn) end # Return a specific image. def get(id, opts = {}, conn = Docker.connection) image_json = conn.get("/images/#{id}/json", opts) hash = Docker::Util.parse_json(image_json) || {} new(conn, hash) end # Delete a specific image def remove(id, opts = {}, conn = Docker.connection) conn.delete("/images/#{id}", opts) end alias_method :delete, :remove # Prune images def prune(conn = Docker.connection) conn.post("/images/prune", {}) end # Save the raw binary representation or one or more Docker images # # @param names [String, Array#String] The image(s) you wish to save # @param filename [String] The file to export the data to. # @param conn [Docker::Connection] The Docker connection to use # # @return [NilClass, String] If filename is nil, return the string # representation of the binary data. If the filename is not nil, then # return nil. def save(names, filename = nil, conn = Docker.connection) if filename File.open(filename, 'wb') do |file| save_stream(names, {}, conn, &response_block_for_save(file)) end nil else string = +'' save_stream(names, {}, conn, &response_block_for_save(string)) string end end # Stream the contents of Docker image(s) to a block. # # @param names [String, Array#String] The image(s) you wish to save # @param conn [Docker::Connection] The Docker connection to use # @yield chunk [String] a chunk of the Docker image(s). def save_stream(names, opts = {}, conn = Docker.connection, &block) # By using compare_by_identity we can create a Hash that has # the same key multiple times. query = {}.tap(&:compare_by_identity) Array(names).each { |name| query['names'.dup] = name } conn.get( '/images/get', query, opts.merge(:response_block => block) ) nil end # Load a tar Image def load(tar, opts = {}, conn = Docker.connection, creds = nil, &block) headers = build_headers(creds) io = tar.is_a?(String) ? File.open(tar, 'rb') : tar body = +"" conn.post( '/images/load', opts, :headers => headers, :response_block => response_block(body, &block) ) { io.read(Excon.defaults[:chunk_size]).to_s } end # Check if an image exists. def exist?(id, opts = {}, conn = Docker.connection) get(id, opts, conn) true rescue Docker::Error::NotFoundError false end # Return every Image. def all(opts = {}, conn = Docker.connection) hashes = Docker::Util.parse_json(conn.get('/images/json', opts)) || [] hashes.map { |hash| new(conn, hash) } end # Given a query like `{ :term => 'sshd' }`, queries the Docker Registry for # a corresponding Image. def search(query = {}, connection = Docker.connection, creds = nil) credentials = creds.nil? ? Docker.creds : creds.to_json headers = credentials && Docker::Util.build_auth_header(credentials) || {} body = connection.get( '/images/search', query, :headers => headers, ) hashes = Docker::Util.parse_json(body) || [] hashes.map { |hash| new(connection, 'id' => hash['name']) } end # Import an Image from the output of Docker::Container#export. The first # argument may either be a File or URI. def import(imp, opts = {}, conn = Docker.connection) require 'open-uri' # This differs after Ruby 2.4 if URI.public_methods.include?(:open) munged_open = URI.method(:open) else munged_open = self.method(:open) end munged_open.call(imp) do |io| import_stream(opts, conn) do io.read(Excon.defaults[:chunk_size]).to_s end end rescue StandardError raise Docker::Error::IOError, "Could not import '#{imp}'" end def import_stream(options = {}, connection = Docker.connection, &block) body = connection.post( '/images/create', options.merge('fromSrc' => '-'), :headers => { 'Content-Type' => 'application/tar', 'Transfer-Encoding' => 'chunked' }, &block ) new(connection, 'id'=> Docker::Util.parse_json(body)['status']) end # Given a Dockerfile as a string, builds an Image. def build(commands, opts = {}, connection = Docker.connection, &block) body = +"" connection.post( '/build', opts, :body => Docker::Util.create_tar('Dockerfile' => commands), :response_block => response_block(body, &block) ) new(connection, 'id' => Docker::Util.extract_id(body)) rescue Docker::Error::ServerError raise Docker::Error::UnexpectedResponseError end # Given File like object containing a tar file, builds an Image. # # If a block is passed, chunks of output produced by Docker will be passed # to that block. def build_from_tar(tar, opts = {}, connection = Docker.connection, creds = nil, &block) headers = build_headers(creds) # The response_block passed to Excon will build up this body variable. body = +"" connection.post( '/build', opts, :headers => headers, :response_block => response_block(body, &block) ) { tar.read(Excon.defaults[:chunk_size]).to_s } new(connection, 'id' => Docker::Util.extract_id(body), :headers => headers) end # Given a directory that contains a Dockerfile, builds an Image. # # If a block is passed, chunks of output produced by Docker will be passed # to that block. def build_from_dir(dir, opts = {}, connection = Docker.connection, creds = nil, &block) tar = Docker::Util.create_dir_tar(dir) build_from_tar tar, opts, connection, creds, &block ensure unless tar.nil? tar.close FileUtils.rm(tar.path, force: true) end end end private # A method to build the config header and merge it into the # headers sent by build_from_dir. def self.build_headers(creds=nil) credentials = creds || Docker.creds || {} config_header = Docker::Util.build_config_header(credentials) headers = { 'Content-Type' => 'application/tar', 'Transfer-Encoding' => 'chunked' } headers = headers.merge(config_header) if config_header headers end # Convenience method to return the path for a particular resource. def path_for(resource) "/images/#{self.id}/#{resource}" end # Convience method to get the Dockerfile for a file hash and a path to # output to. def dockerfile_for(file_hash, output_path) dockerfile = +"from #{self.id}\n" file_hash.keys.each do |basename| dockerfile << "add #{basename} #{output_path}\n" end dockerfile end def ensure_repo_tags refresh! unless info.has_key?('RepoTags') info['RepoTags'] end # Generates the block to be passed as a reponse block to Excon. The returned # lambda will append Docker output to the first argument, and yield output to # the passed block, if a block is given. def self.response_block(body) lambda do |chunk, remaining, total| body << chunk yield chunk if block_given? end end # Generates the block to be passed in to the save request. This lambda will # append the streaming data to the file provided. def self.response_block_for_save(file) lambda do |chunk, remianing, total| file << chunk end end end docker-api-2.4.0/lib/docker/util.rb0000644000004100000410000002223614742317714017121 0ustar www-datawww-data# frozen_string_literal: true require 'set' # This module holds shared logic that doesn't really belong anywhere else in the # gem. module Docker::Util # http://www.tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm#STANDARD-WILDCARDS GLOB_WILDCARDS = /[\?\*\[\{\]\}]/ include Docker::Error module_function # Attaches to a HTTP stream # # @param block # @param msg_stack [Docker::Messages] # @param tty [boolean] def attach_for(block, msg_stack, tty = false) # If TTY is enabled expect raw data and append to stdout if tty attach_for_tty(block, msg_stack) else attach_for_multiplex(block, msg_stack) end end def attach_for_tty(block, msg_stack) messages = Docker::Messages.new lambda do |c,r,t| messages.stdout_messages << c messages.all_messages << c msg_stack.append(messages) block.call c if block end end def attach_for_multiplex(block, msg_stack) messages = Docker::Messages.new lambda do |c,r,t| messages = messages.decipher_messages(c) unless block.nil? messages.stdout_messages.each do |msg| block.call(:stdout, msg) end messages.stderr_messages.each do |msg| block.call(:stderr, msg) end end msg_stack.append(messages) end end def debug(msg) Docker.logger.debug(msg) if Docker.logger end def hijack_for(stdin, block, msg_stack, tty) attach_block = attach_for(block, msg_stack, tty) lambda do |socket| debug "hijack: hijacking the HTTP socket" threads = [] debug "hijack: starting stdin copy thread" threads << Thread.start do debug "hijack: copying stdin => socket" IO.copy_stream stdin, socket debug "hijack: closing write end of hijacked socket" close_write(socket) end debug "hijack: starting hijacked socket read thread" threads << Thread.start do debug "hijack: reading from hijacked socket" begin while chunk = socket.readpartial(512) debug "hijack: got #{chunk.bytesize} bytes from hijacked socket" attach_block.call chunk, nil, nil end rescue EOFError end debug "hijack: killing stdin copy thread" threads.first.kill end threads.each(&:join) end end def close_write(socket) if socket.respond_to?(:close_write) socket.close_write elsif socket.respond_to?(:io) socket.io.close_write else raise IOError, 'Cannot close socket' end end def parse_json(body) MultiJson.load(body) unless body.nil? || body.empty? || (body == 'null') rescue MultiJson::ParseError => ex raise UnexpectedResponseError, ex.message end def parse_repo_tag(str) if match = str.match(/\A(.*):([^:]*)\z/) match.captures else [str, ''] end end def fix_json(body) parse_json("[#{body.gsub(/}\s*{/, '},{')}]") end def create_tar(hash = {}) output = StringIO.new Gem::Package::TarWriter.new(output) do |tar| hash.each do |file_name, file_details| permissions = file_details.is_a?(Hash) ? file_details[:permissions] : 0640 tar.add_file(file_name, permissions) do |tar_file| content = file_details.is_a?(Hash) ? file_details[:content] : file_details tar_file.write(content) end end end output.tap(&:rewind).string end def create_dir_tar(directory) tempfile = create_temp_file directory += '/' unless directory.end_with?('/') create_relative_dir_tar(directory, tempfile) File.new(tempfile.path, 'r') end # return the set of files that form the docker context # implement this logic https://docs.docker.com/engine/reference/builder/#dockerignore-file def docker_context(directory) all_files = glob_all_files(File.join(directory, "**/*")) dockerignore = File.join(directory, '.dockerignore') return all_files unless all_files.include?(dockerignore) # Iterate over valid lines, starting with the initial glob as working set File .read(dockerignore) # https://docs.docker.com/engine/reference/builder/#dockerignore-file .each_line # "a newline-separated list of patterns" .map(&:strip) # "A preprocessing step removes leading and trailing whitespace" .reject(&:empty?) # "Lines that are blank after preprocessing are ignored" .reject { |p| p.start_with?('#') } # "if [a line starts with `#`], then this line is considered as a comment" .each_with_object(Set.new(all_files)) do |p, working_set| # determine the pattern (p) and whether it is to be added or removed from context add = p.start_with?("!") # strip leading "!" from pattern p, then prepend the base directory matches = dockerignore_compatible_glob(File.join(directory, add ? p[1..-1] : p)) # add or remove the matched items as indicated in the ignore file add ? working_set.merge(matches) : working_set.replace(working_set.difference(matches)) end .to_a end def create_relative_dir_tar(directory, output) Gem::Package::TarWriter.new(output) do |tar| files = docker_context(directory) files.each do |prefixed_file_name| stat = File.stat(prefixed_file_name) next unless stat.file? unprefixed_file_name = prefixed_file_name[directory.length..-1] add_file_to_tar( tar, unprefixed_file_name, stat.mode, stat.size, stat.mtime ) do |tar_file| IO.copy_stream(File.open(prefixed_file_name, 'rb'), tar_file) end end end end def add_file_to_tar(tar, name, mode, size, mtime) tar.check_closed io = tar.instance_variable_get(:@io) name, prefix = tar.split_name(name) header = Gem::Package::TarHeader.new(:name => name, :mode => mode, :size => size, :prefix => prefix, :mtime => mtime).to_s io.write header os = Gem::Package::TarWriter::BoundedStream.new io, size yield os if block_given? min_padding = size - os.written io.write("\0" * min_padding) remainder = (512 - (size % 512)) % 512 io.write("\0" * remainder) tar end def create_temp_file tempfile_name = Dir::Tmpname.create('out') {} File.open(tempfile_name, 'wb+') end def extract_id(body) body.lines.reverse_each do |line| if (id = line.match(/Successfully built ([a-f0-9]+)/)) && !id[1].empty? return id[1] end end raise UnexpectedResponseError, "Couldn't find id: #{body}" end # Convenience method to get the file hash corresponding to an array of # local paths. def file_hash_from_paths(local_paths) local_paths.each_with_object({}) do |local_path, file_hash| unless File.exist?(local_path) raise ArgumentError, "#{local_path} does not exist." end basename = File.basename(local_path) if File.directory?(local_path) tar = create_dir_tar(local_path) file_hash[basename] = { content: tar.read, permissions: filesystem_permissions(local_path) } tar.close FileUtils.rm(tar.path) else file_hash[basename] = { content: File.read(local_path, mode: 'rb'), permissions: filesystem_permissions(local_path) } end end end def filesystem_permissions(path) mode = sprintf("%o", File.stat(path).mode) mode[(mode.length - 3)...mode.length].to_i(8) end def build_auth_header(credentials) credentials = MultiJson.dump(credentials) if credentials.is_a?(Hash) encoded_creds = Base64.urlsafe_encode64(credentials) { 'X-Registry-Auth' => encoded_creds } end def build_config_header(credentials) if credentials.is_a?(String) credentials = MultiJson.load(credentials, symbolize_keys: true) end header = MultiJson.dump( credentials[:serveraddress].to_s => { 'username' => credentials[:username].to_s, 'password' => credentials[:password].to_s, 'email' => credentials[:email].to_s } ) encoded_header = Base64.urlsafe_encode64(header) { 'X-Registry-Config' => encoded_header } end # do a directory glob that matches .dockerignore behavior # specifically: matched directories are considered a recursive match def dockerignore_compatible_glob(pattern) begin some_dirs, some_files = glob_all_files(pattern).partition { |f| File.directory?(f) } # since all directories will be re-processed with a /**/* glob, we can preemptively # eliminate any whose parent directory is already in this set. This saves significant time. some_files + some_dirs.reject { |d| some_dirs.any? { |pd| d.start_with?(pd) && d != pd } } end.each_with_object(Set.new) do |f, acc| # expand any directories by globbing; flatten results acc.merge(File.directory?(f) ? glob_all_files("#{f}/**/*") : [f]) end end def glob_all_files(pattern) # globs of "a_dir/**/*" can return "a_dir/.", so explicitly reject those (Dir.glob(pattern, File::FNM_DOTMATCH) - ['..', '.']).reject { |p| p.end_with?("/.") } end end docker-api-2.4.0/lib/docker/messages_stack.rb0000644000004100000410000000102514742317714021131 0ustar www-datawww-data# frozen_string_literal: true # This class represents a messages stack class Docker::MessagesStack attr_accessor :messages # Initialize stack with optional size # # @param size [Integer] def initialize(size = -1) @messages = [] @size = size end # Append messages to stack # # @param messages [Docker::Messages] def append(messages) return if @size == 0 messages.all_messages.each do |msg| @messages << msg @messages.shift if @size > -1 && @messages.size > @size end end end docker-api-2.4.0/lib/docker/base.rb0000644000004100000410000000176514742317714017062 0ustar www-datawww-data# frozen_string_literal: true # This class is a base class for Docker Container and Image. # It is implementing accessor methods for the models attributes. module Docker::Base include Docker::Error attr_accessor :connection, :info attr_reader :id # The private new method accepts a connection and a hash of options that must include an id. def initialize(connection, hash={}) unless connection.is_a?(Docker::Connection) raise ArgumentError, "Expected a Docker::Connection, got: #{connection}." end normalize_hash(hash) @connection, @info, @id = connection, hash, hash['id'] raise ArgumentError, "Must have id, got: #{hash}" unless @id end # The docker-api will some time return "ID" other times it will return "Id" # and other times it will return "id". This method normalize it to "id" # The volumes endpoint returns Name instead of ID, added in the normalize function def normalize_hash(hash) hash["id"] ||= hash.delete("ID") || hash.delete("Id") end end docker-api-2.4.0/lib/docker/exec.rb0000644000004100000410000000700514742317714017065 0ustar www-datawww-data# frozen_string_literal: true # This class represents a Docker Exec Instance. class Docker::Exec include Docker::Base # Convert details about the object into a string # # @return [String] String representation of the Exec instance object def to_s "Docker::Exec { :id => #{self.id}, :connection => #{self.connection} }" end # Create a new Exec instance in a running container. Please note, this does # NOT execute the instance - you must run #start. Also, each instance is # one-time use only. # # @param options [Hash] Parameters to pass in to the API. # @param conn [Docker::Connection] Connection to Docker Remote API # # @return [Docker::Exec] self def self.create(options = {}, conn = Docker.connection) container = options.delete('Container') # Podman does not attach these by default but does require them to be attached if ::Docker.podman?(conn) options['AttachStderr'] = true if options['AttachStderr'].nil? options['AttachStdout'] = true if options['AttachStdout'].nil? end resp = conn.post("/containers/#{container}/exec", {}, body: MultiJson.dump(options)) hash = Docker::Util.parse_json(resp) || {} new(conn, hash) end # Get info about the Exec instance # def json Docker::Util.parse_json(connection.get(path_for(:json), {})) end # Start the Exec instance. The Exec instance is deleted after this so this # command can only be run once. # # @param options [Hash] Options to dictate behavior of the instance # @option options [Object] :stdin (nil) The object to pass to STDIN. # @option options [TrueClass, FalseClass] :detach (false) Whether to attach # to STDOUT/STDERR. # @option options [TrueClass, FalseClass] :tty (false) Whether to attach using # a pseudo-TTY. # # @return [Array, Array, Int] The STDOUT, STDERR and exit code def start!(options = {}, &block) # Parse the Options tty = !!options.delete(:tty) detached = !!options.delete(:detach) stdin = options[:stdin] read_timeout = options[:wait] # Create API Request Body body = MultiJson.dump( 'Tty' => tty, 'Detach' => detached ) excon_params = { body: body } msgs = Docker::Messages.new unless detached if stdin excon_params[:hijack_block] = Docker::Util.hijack_for(stdin, block, msgs, tty) else excon_params[:response_block] = Docker::Util.attach_for(block, msgs, tty) end end excon_params[:read_timeout] = read_timeout unless read_timeout.nil? connection.post(path_for(:start), nil, excon_params) [msgs.stdout_messages, msgs.stderr_messages, self.json['ExitCode']] end # #start! performs the associated action and returns the output. # #start does the same, but rescues from ServerErrors. [:start].each do |method| define_method(method) do |*args| begin; public_send(:"#{method}!", *args); rescue ServerError; self end end end # Resize the TTY associated with the Exec instance # # @param query [Hash] API query parameters # @option query [Fixnum] h Height of the TTY # @option query [Fixnum] w Width of the TTY # # @return [Docker::Exec] self def resize(query = {}) connection.post(path_for(:resize), query) self end # Get the request URI for the given endpoint # # @param endpoint [Symbol] The endpoint to grab # @return [String] The full Remote API endpoint with ID def path_for(endpoint) "/exec/#{self.id}/#{endpoint}" end private :path_for private_class_method :new end docker-api-2.4.0/lib/docker/network.rb0000644000004100000410000000424014742317714017630 0ustar www-datawww-data# frozen_string_literal: true # This class represents a Docker Network. class Docker::Network include Docker::Base def connect(container, opts = {}, body_opts = {}) body = MultiJson.dump({ container: container }.merge(body_opts)) Docker::Util.parse_json( connection.post(path_for('connect'), opts, body: body) ) reload end def disconnect(container, opts = {}) body = MultiJson.dump(container: container) Docker::Util.parse_json( connection.post(path_for('disconnect'), opts, body: body) ) reload end def remove(opts = {}) connection.delete(path_for, opts) nil end alias_method :delete, :remove def json(opts = {}) Docker::Util.parse_json(connection.get(path_for, opts)) end def to_s "Docker::Network { :id => #{id}, :info => #{info.inspect}, "\ ":connection => #{connection} }" end def reload network_json = @connection.get("/networks/#{@id}") hash = Docker::Util.parse_json(network_json) || {} @info = hash end class << self def create(name, opts = {}, conn = Docker.connection) default_opts = MultiJson.dump({ 'Name' => name, 'CheckDuplicate' => true }.merge(opts)) resp = conn.post('/networks/create', {}, body: default_opts) response_hash = Docker::Util.parse_json(resp) || {} get(response_hash['Id'], {}, conn) || {} end def get(id, opts = {}, conn = Docker.connection) network_json = conn.get("/networks/#{id}", opts) hash = Docker::Util.parse_json(network_json) || {} new(conn, hash) end def all(opts = {}, conn = Docker.connection) hashes = Docker::Util.parse_json(conn.get('/networks', opts)) || [] hashes.map { |hash| new(conn, hash) } end def remove(id, opts = {}, conn = Docker.connection) conn.delete("/networks/#{id}", opts) nil end alias_method :delete, :remove def prune(conn = Docker.connection) conn.post("/networks/prune", {}) nil end end # Convenience method to return the path for a particular resource. def path_for(resource = nil) ["/networks/#{id}", resource].compact.join('/') end private :path_for end docker-api-2.4.0/lib/docker/event.rb0000644000004100000410000000545114742317714017265 0ustar www-datawww-data# frozen_string_literal: true # This class represents a Docker Event. class Docker::Event include Docker::Error # Represents the actor object nested within an event class Actor attr_accessor :ID, :Attributes def initialize(actor_attributes = {}) [:ID, :Attributes].each do |sym| value = actor_attributes[sym] if value.nil? value = actor_attributes[sym.to_s] end send("#{sym}=", value) end if self.Attributes.nil? self.Attributes = {} end end alias_method :id, :ID alias_method :attributes, :Attributes end class << self include Docker::Error def stream(opts = {}, conn = Docker.connection, &block) conn.get('/events', opts, :response_block => lambda { |b, r, t| b.each_line do |line| block.call(new_event(line, r, t)) end }) end def since(since, opts = {}, conn = Docker.connection, &block) stream(opts.merge(:since => since), conn, &block) end def new_event(body, remaining, total) return if body.nil? || body.empty? json = Docker::Util.parse_json(body) Docker::Event.new(json) end end attr_accessor :Type, :Action, :time, :timeNano attr_reader :Actor # Deprecated interface attr_accessor :status, :from def initialize(event_attributes = {}) [:Type, :Action, :Actor, :time, :timeNano, :status, :from].each do |sym| value = event_attributes[sym] if value.nil? value = event_attributes[sym.to_s] end send("#{sym}=", value) end if @Actor.nil? value = event_attributes[:id] if value.nil? value = event_attributes['id'] end self.Actor = Actor.new(ID: value) end end def ID self.actor.ID end def Actor=(actor) return if actor.nil? if actor.is_a? Actor @Actor = actor else @Actor = Actor.new(actor) end end alias_method :type, :Type alias_method :action, :Action alias_method :actor, :Actor alias_method :time_nano, :timeNano alias_method :id, :ID def to_s if type.nil? && action.nil? to_s_legacy else to_s_actor_style end end private def to_s_legacy attributes = [] attributes << "from=#{from}" unless from.nil? unless attributes.empty? attribute_string = "(#{attributes.join(', ')}) " end "Docker::Event { #{time} #{status} #{id} #{attribute_string}}" end def to_s_actor_style most_accurate_time = time_nano || time attributes = [] actor.attributes.each do |attribute, value| attributes << "#{attribute}=#{value}" end unless attributes.empty? attribute_string = "(#{attributes.join(', ')}) " end "Docker::Event { #{most_accurate_time} #{type} #{action} #{actor.id} #{attribute_string}}" end end docker-api-2.4.0/lib/docker/version.rb0000644000004100000410000000015414742317714017624 0ustar www-datawww-data# frozen_string_literal: true module Docker # The version of the docker-api gem. VERSION = '2.4.0' end docker-api-2.4.0/lib/docker/volume.rb0000644000004100000410000000230214742317714017443 0ustar www-datawww-data# frozen_string_literal: true # class represents a Docker Volume class Docker::Volume include Docker::Base # /volumes/volume_name doesnt return anything def remove(opts = {}, conn = Docker.connection) conn.delete("/volumes/#{id}") end def normalize_hash(hash) hash['id'] ||= hash['Name'] end class << self # get details for a single volume def get(name, conn = Docker.connection) resp = conn.get("/volumes/#{name}") hash = Docker::Util.parse_json(resp) || {} new(conn, hash) end # /volumes endpoint returns an array of hashes incapsulated in an Volumes tag def all(opts = {}, conn = Docker.connection) resp = conn.get('/volumes') json = Docker::Util.parse_json(resp) || {} hashes = json['Volumes'] || [] hashes.map { |hash| new(conn, hash) } end # creates a volume with an arbitrary name def create(name, opts = {}, conn = Docker.connection) opts['Name'] = name resp = conn.post('/volumes/create', {}, body: MultiJson.dump(opts)) hash = Docker::Util.parse_json(resp) || {} new(conn, hash) end def prune(conn = Docker.connection) conn.post("/volumes/prune") end end end docker-api-2.4.0/lib/docker/connection.rb0000644000004100000410000001153514742317714020303 0ustar www-datawww-data# frozen_string_literal: true # This class represents a Connection to a Docker server. The Connection is # immutable in that once the url and options is set they cannot be changed. class Docker::Connection require 'docker/util' require 'docker/error' include Docker::Error attr_reader :url, :options # Create a new Connection. This method takes a url (String) and options # (Hash). These are passed to Excon, so any options valid for `Excon.new` # can be passed here. def initialize(url, opts) case when !url.is_a?(String) raise ArgumentError, "Expected a String, got: '#{url}'" when !opts.is_a?(Hash) raise ArgumentError, "Expected a Hash, got: '#{opts}'" else uri = URI.parse(url) if uri.scheme == "unix" @url, @options = 'unix:///', {:socket => uri.path}.merge(opts) elsif uri.scheme =~ /^(https?|tcp)$/ @url, @options = url, opts else @url, @options = "http://#{uri}", opts end end end # The actual client that sends HTTP methods to the Docker server. This value # is not cached, since doing so may cause socket errors after bad requests. def resource Excon.new(url, options) end private :resource # Send a request to the server with the ` def request(*args, &block) retries ||= 0 request = compile_request_params(*args, &block) log_request(request) begin resource.request(request).body rescue Excon::Errors::BadRequest => ex if retries < 2 response_cause = '' begin response_cause = JSON.parse(ex.response.body)['cause'] rescue JSON::ParserError #noop end if response_cause.is_a?(String) # The error message will tell the application type given and then the # application type that the message should be # # This is not perfect since it relies on processing a message that # could change in the future. However, it should be a good stop-gap # until all methods are updated to pass in the appropriate content # type. # # A current example message is: # * 'Content-Type: application/json is not supported. Should be "application/x-tar"' matches = response_cause.delete('"\'').scan(%r{(application/\S+)}) unless matches.count < 2 Docker.logger.warn( <<~RETRY_WARNING Automatically retrying with content type '#{response_cause}' Original Error: #{ex} RETRY_WARNING ) if Docker.logger request[:headers]['Content-Type'] = matches.last.first retries += 1 retry end end end raise ClientError, ex.response.body rescue Excon::Errors::Unauthorized => ex raise UnauthorizedError, ex.response.body rescue Excon::Errors::NotFound => ex raise NotFoundError, ex.response.body rescue Excon::Errors::Conflict => ex raise ConflictError, ex.response.body rescue Excon::Errors::InternalServerError => ex raise ServerError, ex.response.body rescue Excon::Errors::Timeout => ex raise TimeoutError, ex.message end end def log_request(request) if Docker.logger Docker.logger.debug( [request[:method], request[:path], request[:query], request[:body]] ) end end def to_s "Docker::Connection { :url => #{url}, :options => #{options} }" end # Delegate all HTTP methods to the #request. [:get, :put, :post, :delete].each do |method| define_method(method) { |*args, &block| request(method, *args, &block) } end # Common attribute requests def info Docker::Util.parse_json(get('/info')) end def ping get('/_ping') end def podman? @podman ||= !( Array(version['Components']).find do |component| component['Name'].include?('Podman') end ).nil? end def rootless? @rootless ||= (info['Rootless'] == true) end def version @version ||= Docker::Util.parse_json(get('/version')) end private # Given an HTTP method, path, optional query, extra options, and block, # compiles a request. def compile_request_params(http_method, path, query = nil, opts = nil, &block) query ||= {} opts ||= {} headers = opts.delete(:headers) || {} content_type = opts[:body].nil? ? 'text/plain' : 'application/json' user_agent = "Swipely/Docker-API #{Docker::VERSION}" { :method => http_method, :path => path, :query => query, :headers => { 'Content-Type' => content_type, 'User-Agent' => user_agent, }.merge(headers), :expects => (200..204).to_a << 301 << 304, :idempotent => http_method == :get, :request_block => block, }.merge(opts).reject { |_, v| v.nil? } end end docker-api-2.4.0/lib/excon/0000755000004100000410000000000014742317714015457 5ustar www-datawww-datadocker-api-2.4.0/lib/excon/middlewares/0000755000004100000410000000000014742317714017757 5ustar www-datawww-datadocker-api-2.4.0/lib/excon/middlewares/hijack.rb0000644000004100000410000000331214742317714021534 0ustar www-datawww-data# frozen_string_literal: true module Excon module Middleware # Hijack is an Excon middleware which parses response headers and then # yields the underlying TCP socket for raw TCP communication (used to # attach to STDIN of containers). class Hijack < Base def self.valid_parameter_keys [:hijack_block].freeze end def build_response(status, socket) response = { :body => '', :headers => Excon::Headers.new, :status => status, :remote_ip => socket.respond_to?(:remote_ip) && socket.remote_ip, } if socket.data[:scheme] =~ /^(https?|tcp)$/ response.merge({ :local_port => socket.respond_to?(:local_port) && socket.local_port, :local_address => socket.respond_to?(:local_address) && socket.local_address }) end response end def response_call(datum) if datum[:hijack_block] # Need to process the response headers here rather than in # Excon::Middleware::ResponseParser as the response parser will # block trying to read the body. socket = datum[:connection].send(:socket) # c.f. Excon::Response.parse until match = /^HTTP\/\d+\.\d+\s(\d{3})\s/.match(socket.readline); end status = match[1].to_i datum[:response] = build_response(status, socket) Excon::Response.parse_headers(socket, datum) datum[:hijack_block].call socket.instance_variable_get(:@socket) end @stack.response_call(datum) end end end end docker-api-2.4.0/lib/docker-api.rb0000644000004100000410000000006014742317714016702 0ustar www-datawww-data# frozen_string_literal: true require 'docker' docker-api-2.4.0/docker-api.gemspec0000644000004100000410000000514714742317714017167 0ustar www-datawww-data######################################################### # This file has been automatically generated by gem2tgz # ######################################################### # -*- encoding: utf-8 -*- # stub: docker-api 2.4.0 ruby lib Gem::Specification.new do |s| s.name = "docker-api".freeze s.version = "2.4.0" s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= s.require_paths = ["lib".freeze] s.authors = ["Swipely, Inc.".freeze] s.date = "2024-10-30" s.description = "A simple REST client for the Docker Remote API".freeze s.email = "tomhulihan@swipely.com bright@swipely.com toddlunter@swipely.com".freeze s.files = ["LICENSE".freeze, "README.md".freeze, "lib/docker-api.rb".freeze, "lib/docker.rb".freeze, "lib/docker/base.rb".freeze, "lib/docker/connection.rb".freeze, "lib/docker/container.rb".freeze, "lib/docker/error.rb".freeze, "lib/docker/event.rb".freeze, "lib/docker/exec.rb".freeze, "lib/docker/image.rb".freeze, "lib/docker/messages.rb".freeze, "lib/docker/messages_stack.rb".freeze, "lib/docker/network.rb".freeze, "lib/docker/rake_task.rb".freeze, "lib/docker/util.rb".freeze, "lib/docker/version.rb".freeze, "lib/docker/volume.rb".freeze, "lib/excon/middlewares/hijack.rb".freeze] s.homepage = "https://github.com/upserve/docker-api".freeze s.licenses = ["MIT".freeze] s.rubygems_version = "3.3.15".freeze s.summary = "A simple REST client for the Docker Remote API".freeze if s.respond_to? :specification_version then s.specification_version = 4 end if s.respond_to? :add_runtime_dependency then s.add_runtime_dependency(%q.freeze, [">= 0.64.0"]) s.add_runtime_dependency(%q.freeze, [">= 0"]) s.add_development_dependency(%q.freeze, [">= 0"]) s.add_development_dependency(%q.freeze, [">= 0"]) s.add_development_dependency(%q.freeze, [">= 0"]) s.add_development_dependency(%q.freeze, ["~> 3.0"]) s.add_development_dependency(%q.freeze, [">= 0"]) s.add_development_dependency(%q.freeze, [">= 0"]) s.add_development_dependency(%q.freeze, [">= 0"]) else s.add_dependency(%q.freeze, [">= 0.64.0"]) s.add_dependency(%q.freeze, [">= 0"]) s.add_dependency(%q.freeze, [">= 0"]) s.add_dependency(%q.freeze, [">= 0"]) s.add_dependency(%q.freeze, [">= 0"]) s.add_dependency(%q.freeze, ["~> 3.0"]) s.add_dependency(%q.freeze, [">= 0"]) s.add_dependency(%q.freeze, [">= 0"]) s.add_dependency(%q.freeze, [">= 0"]) end end docker-api-2.4.0/LICENSE0000644000004100000410000000207114742317714014602 0ustar www-datawww-dataThe MIT License (MIT) Copyright (c) 2014 Swipely, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. docker-api-2.4.0/README.md0000644000004100000410000007032014742317714015056 0ustar www-datawww-datadocker-api ========== [![Gem Version](https://badge.fury.io/rb/docker-api.svg)](https://badge.fury.io/rb/docker-api) [![Code Climate](https://codeclimate.com/github/upserve/docker-api.svg)](https://codeclimate.com/github/upserve/docker-api) This gem provides an object-oriented interface to the [Docker Engine API](https://docs.docker.com/develop/sdk/). Every method listed there is implemented. At the time of this writing, docker-api is meant to interface with Docker version 1.4.* If you're interested in using Docker to package your apps, we recommend the [dockly](https://github.com/upserve/dockly) gem. Dockly provides a simple DSL for describing Docker containers that install as Debian packages and are controlled by upstart scripts. Installation ------------ Add this line to your application's Gemfile: ```ruby gem 'docker-api' ``` And then run: ```shell $ bundle install ``` Alternatively, if you wish to just use the gem in a script, you can run: ```shell $ gem install docker-api ``` Finally, just add `require 'docker'` to the top of the file using this gem. Usage ----- docker-api is designed to be very lightweight. Almost no state is cached (aside from id's which are immutable) to ensure that each method call's information is up to date. As such, just about every external method represents an API call. At this time, basic `podman` support has been added via the podman docker-compatible API socket. ## Starting up Follow the [installation instructions](https://docs.docker.com/install/), and then run: ```shell $ sudo docker -d ``` This will daemonize Docker so that it can be used for the remote API calls. ### Host If you're running Docker locally as a socket, there is no setup to do in Ruby. If you're not using a socket or have changed the path of the socket, you'll have to point the gem to your socket or local/remote port. For example: ```ruby Docker.url = 'tcp://example.com:5422' ``` Two things to note here. The first is that this gem uses [excon](https://github.com/excon/excon), so any of the options that are valid for `Excon.new` are also valid for `Docker.options`. Second, by default Docker runs on a socket. The gem will assume you want to connect to the socket unless you specify otherwise. Also, you may set the above variables via `ENV` variables. For example: ```shell $ DOCKER_URL=unix:///var/docker.sock irb irb(main):001:0> require 'docker' => true irb(main):002:0> Docker.url => "unix:///var/docker.sock" irb(main):003:0> Docker.options => {} ``` ```shell $ DOCKER_URL=tcp://example.com:1000 irb irb(main):001:0> require 'docker' => true irb(main):003:0> Docker.url => "tcp://example.com:1000" irb(main):004:0> Docker.options => {} ``` ### SSL When running docker using SSL, setting the DOCKER_CERT_PATH will configure docker-api to use SSL. The cert path is a folder that contains the cert, key and cacert files. docker-api is expecting the files to be named: cert.pem, key.pem, and ca.pem. If your files are named different, you'll want to set your options explicity: ``` Docker.options = { client_cert: File.join(cert_path, 'cert.pem'), client_key: File.join(cert_path, 'key.pem'), ssl_ca_file: File.join(cert_path, 'ca.pem'), scheme: 'https' } ``` If you want to load the cert files from a variable, e.g. you want to load them from ENV as needed on Heroku: ``` cert_store = OpenSSL::X509::Store.new certificate = OpenSSL::X509::Certificate.new ENV["DOCKER_CA"] cert_store.add_cert certificate Docker.options = { client_cert_data: ENV["DOCKER_CERT"], client_key_data: ENV["DOCKER_KEY"], ssl_cert_store: cert_store, scheme: 'https' } ``` If you need to disable SSL verification, set the DOCKER_SSL_VERIFY variable to 'false'. ## Global calls All of the following examples require a connection to a Docker server. See the Starting up section above for more information. ```ruby require 'docker' # => true # docker command for reference: docker version Docker.version # => { 'Version' => '0.5.2', 'GoVersion' => 'go1.1' } # docker command for reference: docker info Docker.info # => { "Debug" => false, "Containers" => 187, "Images" => 196, "NFd" => 10, "NGoroutines" => 9, "MemoryLimit" => true } # docker command for reference: docker login Docker.authenticate!('username' => 'docker-fan-boi', 'password' => 'i<3docker', 'email' => 'dockerboy22@aol.com') # => true # docker command for reference: docker login registry.gitlab.com Docker.authenticate!('username' => 'docker-fan-boi', 'password' => 'i<3docker', 'email' => 'dockerboy22@aol.com', 'serveraddress' => 'https://registry.gitlab.com/v1/') # => true ``` ## Images Just about every method here has a one-to-one mapping with the [Images](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.14/#2-2-images) section of the API. If an API call accepts query parameters, these can be passed as an Hash to it's corresponding method. Also, note that `Docker::Image.new` is a private method, so you must use `.create`, `.build`, `.build_from_dir`, `build_from_tar`, or `.import` to make an instance. ```ruby require 'docker' # => true # Pull an Image. # docker command for reference: docker pull ubuntu:14.04 image = Docker::Image.create('fromImage' => 'ubuntu:14.04') # => Docker::Image { :id => ae7ffbcd1, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Insert a local file into an Image. image.insert_local('localPath' => 'Gemfile', 'outputPath' => '/') # => Docker::Image { :id => 682ea192f, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Insert multiple local files into an Image. image.insert_local('localPath' => [ 'Gemfile', 'Rakefile' ], 'outputPath' => '/') # => Docker::Image { :id => eb693ec80, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Add a repo name to Image. # docker command for reference: docker tag base2 image.tag('repo' => 'base2', 'force' => true) # => ["base2"] # Add a repo name and tag an Image. # docker command for reference: docker tag base2:latest image.tag('repo' => 'base2', 'tag' => 'latest', force: true) # => ["base2:latest"] # Get more information about the Image. # docker command for reference: docker inspect image.json # => {"id"=>"67859327bf22ef8b5b9b4a6781f72b2015acd894fa03ce07e0db7af170ba468c", "comment"=>"Imported from -", "created"=>"2013-06-19T18:42:58.287944526-04:00", "container_config"=>{"Hostname"=>"", "User"=>"", "Memory"=>0, "MemorySwap"=>0, "CpuShares"=>0, "AttachStdin"=>false, "AttachStdout"=>false, "AttachStderr"=>false, "PortSpecs"=>nil, "Tty"=>false, "OpenStdin"=>false, "StdinOnce"=>false, "Env"=>nil, "Cmd"=>nil, "Dns"=>nil, "Image"=>"", "Volumes"=>nil, "VolumesFrom"=>""}, "docker_version"=>"0.4.0", "architecture"=>"x86_64"} # View the history of the Image. image.history # => [{"Id"=>"67859327bf22", "Created"=>1371681778}] # Push the Image to the Docker registry. Note that you have to login using # `Docker.authenticate!` and tag the Image first. # docker command for reference: docker push image.push # => Docker::Image { @connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} }, @info = { "id" => eb693ec80, "RepoTags" => ["base2", "base2/latest"]} } # Push individual tag to the Docker registry. image.push(nil, tag: "tag_name") image.push(nil, repo_tag: 'registry/repo_name:tag_name') # Given a command, create a new Container to run that command in the Image. # docker command for reference: docker run -ti ls -l image.run('ls -l') # => Docker::Container { id => aaef712eda, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Remove the Image from the server. # docker command for reference: docker rmi -f image.remove(:force => true) # => true # Export a single Docker Image to a file # docker command for reference: docker save my_export.tar image.save('my_export.tar') # => Docker::Image { :id => 66b712aef, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Return the raw image binary data image.save # => "abiglongbinarystring" # Stream the contents of the image to a block: image.save_stream { |chunk| puts chunk } # => nil # Given a Container's export, creates a new Image. # docker command for reference: docker import some-export.tar Docker::Image.import('some-export.tar') # => Docker::Image { :id => 66b712aef, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # `Docker::Image.import` can also import from a URI Docker::Image.import('http://some-site.net/my-image.tar') # => Docker::Image { :id => 6b462b2d2, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # For a lower-level interface for importing tars, `Docker::Image.import_stream` may be used. # It accepts a block, and will call that block until it returns an empty `String`. File.open('my-export.tar') do |file| Docker::Image.import_stream { file.read(1000).to_s } end # Create an Image from a Dockerfile as a String. Docker::Image.build("from base\nrun touch /test") # => Docker::Image { :id => b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Create an Image from a Dockerfile. # docker command for reference: docker build . Docker::Image.build_from_dir('.') # => Docker::Image { :id => 1266dc19e, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Create an Image from a file other than Dockerfile. # docker command for reference: docker build -f Dockerfile.Centos . Docker::Image.build_from_dir('.', { 'dockerfile' => 'Dockerfile.Centos' }) # => Docker::Image { :id => 1266dc19e, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Create an Image from a Dockerfile and stream the logs Docker::Image.build_from_dir('.') do |v| if (log = JSON.parse(v)) && log.has_key?("stream") $stdout.puts log["stream"] end end # Create an Image from a tar file. # docker command for reference: docker build - < docker_image.tar Docker::Image.build_from_tar(File.open('docker_image.tar', 'r')) # => Docker::Image { :id => 1266dc19e, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Load all Images on your Docker server. # docker command for reference: docker images Docker::Image.all # => [Docker::Image { :id => b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => 8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }] # Get Image from the server, with id # docker command for reference: docker images Docker::Image.get('df4f1bdecf40') # => Docker::Image { :id => eb693ec80, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Check if an image with a given id exists on the server. Docker::Image.exist?('ef723dcdac09') # => true # Load an image from the file system Docker::Image.load('./my-image.tar') # => "" # An IO object may also be specified for loading File.open('./my-image.tar', 'rb') do |file| Docker::Image.load(file) end # => "" # Export multiple images to a single tarball # docker command for reference: docker save my_image1 my_image2:not_latest > my_export.tar names = %w( my_image1 my_image2:not_latest ) Docker::Image.save(names, 'my_export.tar') # => nil # Return the raw image binary data names = %w( my_image1 my_image2:not_latest ) Docker::Image.save(names) # => "abiglongbinarystring" # Stream the raw binary data names = %w( my_image1 my_image2:not_latest ) Docker::Image.save_stream(names) { |chunk| puts chunk } # => nil # Search the Docker registry. # docker command for reference: docker search sshd Docker::Image.search('term' => 'sshd') # => [Docker::Image { :id => cespare/sshd, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => johnfuller/sshd, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => dhrp/mongodb-sshd, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => rayang2004/sshd, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => dhrp/sshd, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => toorop/daemontools-sshd, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => toorop/daemontools-sshd-nginx, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => toorop/daemontools-sshd-nginx-php-fpm, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => mbkan/lamp, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => toorop/golang, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => wma55/u1210sshd, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => jdswinbank/sshd, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => vgauthier/sshd, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }] ``` ## Containers Much like the Images, this object also has a one-to-one mapping with the [Containers](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.14/#2-1-containers) section of the API. Also like Images, `.new` is a private method, so you must use `.create` to make an instance. ```ruby require 'docker' # Create a Container. container = Docker::Container.create('Cmd' => ['ls'], 'Image' => 'base') # => Docker::Container { :id => 492510dd38e4, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Get more information about the Container. container.json # => {"ID"=>"492510dd38e4da7703f36dfccd013de672b8250f57f59d1555ced647766b5e82", "Created"=>"2013-06-20T10:46:02.897548-04:00", "Path"=>"ls", "Args"=>[], "Config"=>{"Hostname"=>"492510dd38e4", "User"=>"", "Memory"=>0, "MemorySwap"=>0, "CpuShares"=>0, "AttachStdin"=>false, "AttachStdout"=>false, "AttachStderr"=>false, "PortSpecs"=>nil, "Tty"=>false, "OpenStdin"=>false, "StdinOnce"=>false, "Env"=>nil, "Cmd"=>["ls"], "Dns"=>nil, "Image"=>"base", "Volumes"=>nil, "VolumesFrom"=>""}, "State"=>{"Running"=>false, "Pid"=>0, "ExitCode"=>0, "StartedAt"=>"0001-01-01T00:00:00Z", "Ghost"=>false}, "Image"=>"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "NetworkSettings"=>{"IpAddress"=>"", "IpPrefixLen"=>0, "Gateway"=>"", "Bridge"=>"", "PortMapping"=>nil}, "SysInitPath"=>"/usr/bin/docker", "ResolvConfPath"=>"/etc/resolv.conf", "Volumes"=>nil} # Start running the Container. container.start # => Docker::Container { :id => 492510dd38e4, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Stop running the Container. container.stop # => Docker::Container { :id => 492510dd38e4, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Restart the Container. container.restart # => Docker::Container { :id => 492510dd38e4, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Pause the running Container processes. container.pause # => Docker::Container { :id => 492510dd38e4, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Unpause the running Container processes. container.unpause # => Docker::Container { :id => 492510dd38e4, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Kill the command running in the Container. container.kill # => Docker::Container { :id => 492510dd38e4, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Kill the Container specifying the kill signal. container.kill(:signal => "SIGHUP") # => Docker::Container { :id => 492510dd38e4, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Return the currently executing processes in a Container. container.top # => [{"PID"=>"4851", "TTY"=>"pts/0", "TIME"=>"00:00:00", "CMD"=>"lxc-start"}] # Same as above, but uses the original format container.top(format: :hash) # => { # "Titles" => ["PID", "TTY", "TIME", "CMD"], # "Processes" => [["4851", "pts/0", "00:00:00", "lxc-start"]] # } # To expose 1234 to bridge # In Dockerfile: EXPOSE 1234/tcp # docker run resulting-image-name Docker::Container.create( 'Image' => 'image-name', 'HostConfig' => { 'PortBindings' => { '1234/tcp' => [{}] } } ) # To expose 1234 to host with any port # docker run -p 1234 image-name Docker::Container.create( 'Image' => 'image-name', 'ExposedPorts' => { '1234/tcp' => {} }, 'HostConfig' => { 'PortBindings' => { '1234/tcp' => [{}] } } ) # To expose 1234 to host with a specified host port # docker run -p 1234:1234 image-name Docker::Container.create( 'Image' => 'image-name', 'ExposedPorts' => { '1234/tcp' => {} }, 'HostConfig' => { 'PortBindings' => { '1234/tcp' => [{ 'HostPort' => '1234' }] } } ) # To expose 1234 to host with a specified host port and host IP # docker run -p 192.168.99.100:1234:1234 image-name Docker::Container.create( 'Image' => 'image-name', 'ExposedPorts' => { '1234/tcp' => {} }, 'HostConfig' => { 'PortBindings' => { '1234/tcp' => [{ 'HostPort' => '1234', 'HostIp' => '192.168.99.100' }] } } ) # To set container name pass `name` key to options Docker::Container.create( 'name' => 'my-new-container', 'Image' => 'image-name' ) # Stores a file with the given content in the container container.store_file("/test", "Hello world") # Reads a file from the container container.read_file("/test") # => "Hello world" # Export a Container. Since an export is typically at least 300M, chunks of the # export are yielded instead of just returning the whole thing. File.open('export.tar', 'w') do |file| container.export { |chunk| file.write(chunk) } end # => nil # Inspect a Container's changes to the file system. container.changes # => [{'Path'=>'/dev', 'Kind'=>0}, {'Path'=>'/dev/kmsg', 'Kind'=>1}] # Copy files/directories from the Container. Note that these are exported as tars. container.copy('/etc/hosts') { |chunk| puts chunk } hosts0000644000000000000000000000023412100405636007023 0ustar 127.0.0.1 localhost ::1 localhost ip6-localhost ip6-loopback fe00::0 ip6-localnet ff00::0 ip6-mcastprefix ff02::1 ip6-allnodes ff02::2 ip6-allrouters # => Docker::Container { :id => a1759f3e2873, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Wait for the current command to finish executing. If an argument is given, # will timeout after that number of seconds. The default is one minute. container.wait(15) # => {'StatusCode'=>0} # Attach to the Container. Currently, the below options are the only valid ones. # By default, :stream, :stdout, and :stderr are set. container.attach(:stream => true, :stdin => nil, :stdout => true, :stderr => true, :logs => true, :tty => false) # => [["bin\nboot\ndev\netc\nhome\nlib\nlib64\nmedia\nmnt\nopt\nproc\nroot\nrun\nsbin\nselinux\nsrv\nsys\ntmp\nusr\nvar", []] # If you wish to stream the attach method, a block may be supplied. container = Docker::Container.create('Image' => 'base', 'Cmd' => ['find / -name *']) container.tap(&:start).attach { |stream, chunk| puts "#{stream}: #{chunk}" } stderr: 2013/10/30 17:16:24 Unable to locate find / -name * # => [[], ["2013/10/30 17:16:24 Unable to locate find / -name *\n"]] # If you want to attach to stdin of the container, supply an IO-like object: container = Docker::Container.create('Image' => 'base', 'Cmd' => ['cat'], 'OpenStdin' => true, 'StdinOnce' => true) container.tap(&:start).attach(stdin: StringIO.new("foo\nbar\n")) # => [["foo\nbar\n"], []] # Similar to the stdout/stderr attach method, there is logs and streaming_logs # logs will only return after the container has exited. The output will be the raw output from the logs stream. # streaming_logs will collect the messages out of the multiplexed form and also execute a block on each line that comes in (block takes a stream and a chunk as arguments) # Raw logs from a TTY-enabled container after exit container.logs(stdout: true) # => "\e]0;root@8866c76564e8: /\aroot@8866c76564e8:/# echo 'i\b \bdocker-api'\r\ndocker-api\r\n\e]0;root@8866c76564e8: /\aroot@8866c76564e8:/# exit\r\n" # Logs from a non-TTY container with multiplex prefix container.logs(stdout: true) # => "\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00021\n\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00022\n\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00023\n\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00024\n\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00025\n\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00026\n\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00027\n\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00028\n\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00029\n\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u000310\n" # Streaming logs from non-TTY container removing multiplex prefix with a block printing out each line (block not possible with Container#logs) container.streaming_logs(stdout: true) { |stream, chunk| puts "#{stream}: #{chunk}" } stdout: 1 stdout: 2 stdout: 3 stdout: 4 stdout: 5 stdout: 6 stdout: 7 stdout: 8 stdout: 9 stdout: 10 # => "1\n\n2\n\n3\n\n4\n\n5\n\n6\n\n7\n\n8\n\n9\n\n10\n" # If the container has TTY enabled, set `tty => true` to get the raw stream: command = ["bash", "-c", "if [ -t 1 ]; then echo -n \"I'm a TTY!\"; fi"] container = Docker::Container.create('Image' => 'ubuntu', 'Cmd' => command, 'Tty' => true) container.tap(&:start).attach(:tty => true) # => [["I'm a TTY!"], []] # Obtaining the current statistics of a container container.stats # => {"read"=>"2016-02-29T20:47:05.221608695Z", "precpu_stats"=>{"cpu_usage"=> ... } # Create an Image from a Container's changes. container.commit # => Docker::Image { :id => eaeb8d00efdf, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Commit the Container and run a new command. The second argument is the number # of seconds the Container should wait before stopping its current command. container.run('pwd', 10) # => Docker::Image { :id => 4427be4199ac, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Run an Exec instance inside the container and capture its output and exit status container.exec(['date']) # => [["Wed Nov 26 11:10:30 CST 2014\n"], [], 0] # Launch an Exec instance without capturing its output or status container.exec(['./my_service'], detach: true) # => Docker::Exec { :id => be4eaeb8d28a, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Parse the output of an Exec instance container.exec(['find', '/', '-name *']) { |stream, chunk| puts "#{stream}: #{chunk}" } stderr: 2013/10/30 17:16:24 Unable to locate find / -name * # => [[], ["2013/10/30 17:16:24 Unable to locate find / -name *\n"], 1] # Run an Exec instance by grab only the STDOUT output container.exec(['date'], stderr: false) # => [["Wed Nov 26 11:10:30 CST 2014\n"], [], 0] # Pass input to an Exec instance command via Stdin container.exec(['cat'], stdin: StringIO.new("foo\nbar\n")) # => [["foo\nbar\n"], [], 0] # Get the raw stream of data from an Exec instance command = ["bash", "-c", "if [ -t 1 ]; then echo -n \"I'm a TTY!\"; fi"] container.exec(command, tty: true) # => [["I'm a TTY!"], [], 0] # Wait for the current command to finish executing. If an argument is given, # will timeout after that number of seconds. The default is one minute. command = ["bash", "-c", "if [ -t 1 ]; then echo -n \"Set max seconds for exec!!\"; fi"] container.exec(command, wait: 120) # => [["Set max seconds for exec!"], [], 0] # Delete a Container. container.delete(:force => true) # => nil # Update the container. container.update("CpuShares" => 50000") # Request a Container by ID or name. Docker::Container.get('500f53b25e6e') # => Docker::Container { :id => , :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } } # Request all of the Containers. By default, will only return the running Containers. Docker::Container.all(:all => true) # => [Docker::Container { :id => , :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }] ``` ## JSON encoded values For JSON encoded values, nothing is done implicitly, meaning you need to explicitly call `to_json` on your parameter before the call. For example, to request all of the Containers using a filter: ```ruby require 'docker' # Request all of the Containers, filtering by status exited. Docker::Container.all(all: true, filters: { status: ["exited"] }.to_json) # => [Docker::Container { :id => , :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }] # Request all of the Container, filtering by label_name. Docker::Container.all(all: true, filters: { label: [ "label_name" ] }.to_json) # => [Docker::Container { :id => , :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }] # Request all of the Container, filtering by label label_name that have the value label_value_. Docker::Container.all(all: true, filters: { label: [ "label_name=label_value" ] }.to_json) # => [Docker::Container { :id => , :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }] ``` This applies for all parameters that are requested to be JSON encoded by the docker api. ## Events ```ruby require 'docker' # Action on a stream of events as they come in Docker::Event.stream { |event| puts event; break } Docker::Event { :status => create, :id => aeb8b55726df63bdd69d41e1b2650131d7ce32ca0d2fa5cbc75f24d0df34c7b0, :from => base:latest, :time => 1416958554 } # => nil # Action on all events after a given time (will execute the block for all events up till the current time, and wait to execute on any new events after) Docker::Event.since(1416958763) { |event| puts event; puts Time.now.to_i; break } Docker::Event { :status => die, :id => 663005cdeb56f50177c395a817dbc8bdcfbdfbdaef329043b409ecb97fb68d7e, :from => base:latest, :time => 1416958764 } 1416959041 # => nil ``` These methods are prone to read timeouts. `Docker.options[:read_timeout]` will need to be made higher than 60 seconds if expecting a long time between events. ## Connecting to Multiple Servers By default, each object connects to the connection specified by `Docker.connection`. If you need to connect to multiple servers, you can do so by specifying the connection on `#new` or in the utilizing class method. For example: ```ruby require 'docker' Docker::Container.all({}, Docker::Connection.new('tcp://example.com:2375', {})) ``` ## Rake Task To create images through `rake`, a DSL task is provided. For example: ```ruby require 'rake' require 'docker' image 'repo:tag' do image = Docker::Image.create('fromImage' => 'repo', 'tag' => 'old_tag') image = Docker::Image.run('rm -rf /etc').commit image.tag('repo' => 'repo', 'tag' => 'tag') end image 'repo:new_tag' => 'repo:tag' do image = Docker::Image.create('fromImage' => 'repo', 'tag' => 'tag') image = image.insert_local('localPath' => 'some-file.tar.gz', 'outputPath' => '/') image.tag('repo' => 'repo', 'tag' => 'new_tag') end ``` ## Not supported (yet) * Generating a tarball of images and metadata for a repository specified by a name: https://docs.docker.com/engine/reference/api/docker_remote_api_v1.14/#get-a-tarball-containing-all-images-and-tags-in-a-repository * Load a tarball generated from docker that contains all the images and metadata of a repository: https://docs.docker.com/engine/reference/api/docker_remote_api_v1.14/#load-a-tarball-with-a-set-of-images-and-tags-into-docker License ----- This program is licensed under the MIT license. See LICENSE for details.