pax_global_header00006660000000000000000000000064147715437050014527gustar00rootroot0000000000000052 comment=33df7695a71cb344eaccca2a090706217b95f9f6 vagrant_cloud-3.1.3/000077500000000000000000000000001477154370500143635ustar00rootroot00000000000000vagrant_cloud-3.1.3/.ci/000077500000000000000000000000001477154370500150345ustar00rootroot00000000000000vagrant_cloud-3.1.3/.ci/.ci-utility-files/000077500000000000000000000000001477154370500203065ustar00rootroot00000000000000vagrant_cloud-3.1.3/.ci/.ci-utility-files/common.sh000066400000000000000000003531771477154370500221520ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: MPL-2.0 # # shellcheck disable=SC2119 # shellcheck disable=SC2164 # If the bash version isn't at least 4, bail if [ "${BASH_VERSINFO:-0}" -lt "4" ]; then printf "ERROR: Expected bash version >= 4 (is: %d)" "${BASH_VERSINFO:-0}" exit 1 fi # Lets have some emojis WARNING_ICON="⚠️" ERROR_ICON="🛑" # Coloring # shellcheck disable=SC2034 TEXT_BOLD='\e[1m' TEXT_RED='\e[31m' # shellcheck disable=SC2034 TEXT_GREEN='\e[32m' TEXT_YELLOW='\e[33m' TEXT_CYAN='\e[36m' TEXT_CLEAR='\e[0m' # Common variables export full_sha="${GITHUB_SHA}" export short_sha="${full_sha:0:8}" export ident_ref="${GITHUB_REF#*/*/}" export repository="${GITHUB_REPOSITORY}" export repo_owner="${repository%/*}" export repo_name="${repository#*/}" # shellcheck disable=SC2153 export asset_cache="${ASSETS_PRIVATE_SHORTTERM}/${repository}/${GITHUB_ACTION}" export run_number="${GITHUB_RUN_NUMBER}" export run_id="${GITHUB_RUN_ID}" export job_id="${run_id}-${run_number}" readonly hc_releases_metadata_filename="release-meta.json" # This value is used in our cleanup trap to restore the value in cases # where a function call may have failed and did not restore it readonly _repository_backup="${repository}" if [ -z "${ci_bin_dir}" ]; then if ci_bin_dir="$(realpath ./.ci-bin)"; then export ci_bin_dir else echo "ERROR: Failed to create the local CI bin directory" exit 1 fi fi # We are always noninteractive export DEBIAN_FRONTEND=noninteractive # If we are on a runner and debug mode is enabled, # enable debug mode for ourselves too if [ -n "${RUNNER_DEBUG}" ]; then DEBUG=1 fi # If DEBUG is enabled and we are running tests, # flag it so we can adjust where output is sent. if [ -n "${DEBUG}" ] && [ -n "${BATS_TEST_FILENAME}" ]; then DEBUG_WITH_BATS=1 fi # Write debug output to stderr. Message template # and arguments are passed to `printf` for formatting. # # $1: message template # $#: message arguments # # NOTE: Debug output is only displayed when DEBUG is set function debug() { if [ -n "${DEBUG}" ]; then local msg_template="${1}" local i=$(( ${#} - 1 )) local msg_args=("${@:2:$i}") # Update template to include caller information msg_template=$(printf "<%s(%s:%d)> %s" "${FUNCNAME[1]}" "${BASH_SOURCE[1]}" "${BASH_LINENO[0]}" "${msg_template}") #shellcheck disable=SC2059 msg="$(printf "${msg_template}" "${msg_args[@]}")" if [ -n "${DEBUG_WITH_BATS}" ]; then printf "%b%s%b\n" "${TEXT_CYAN}" "${msg}" "${TEXT_CLEAR}" >&3 else printf "%b%s%b\n" "${TEXT_CYAN}" "${msg}" "${TEXT_CLEAR}" >&2 fi fi } # Wrap the pushd command so we fail # if the pushd command fails. Arguments # are just passed through. function pushd() { debug "executing 'pushd %s'" "${*}" command builtin pushd "${@}" > /dev/null || exit 1 } # Wrap the popd command so we fail # if the popd command fails. Arguments # are just passed through. # shellcheck disable=SC2120 function popd() { debug "executing 'popd %s'" "${*}" command builtin popd "${@}" || exit 1 } # Wraps the aws CLI command to support # role based access. It will check for # expected environment variables when # a role has been assumed. If they are # not found, it will assume the configured # role. If the role has already been # assumed, it will check that the credentials # have not timed out, and re-assume the # role if so. If no role information is # provided, it will just pass the command # through directly # # NOTE: Required environment variable: AWS_ASSUME_ROLE_ARN # NOTE: This was a wrapper for the AWS command that would properly # handle the assume role process and and automatically refresh # if close to expiry. With credentials being handled by the doormat # action now, this is no longer needed but remains in case it's # needed for some reason in the future. function aws_deprecated() { # Grab the actual aws cli path if ! aws_path="$(which aws)"; then (>&2 echo "AWS error: failed to locate aws cli executable") return 1 fi # First, check if the role ARN environment variable is # configured. If it is not, just pass through. if [ "${AWS_ASSUME_ROLE_ARN}" = "" ]; then "${aws_path}" "${@}" return $? fi # Check if a role has already been assumed. If it # has, validate the credentials have not timed out # and pass through. if [ "${AWS_SESSION_TOKEN}" != "" ]; then # Cut off part of the expiration so we don't end up hitting # the expiration just as we make our call expires_at=$(date -d "${AWS_SESSION_EXPIRATION} - 20 sec" "+%s") if (( "${expires_at}" > $(date +%s) )); then "${aws_path}" "${@}" return $? fi # If we are here then the credentials were not # valid so clear the session token and restore # original credentials unset AWS_SESSION_TOKEN unset AWS_SESSION_EXPIRATION export AWS_ACCESS_KEY_ID="${CORE_AWS_ACCESS_KEY_ID}" export AWS_SECRET_ACCESS_KEY="${CORE_AWS_SECRET_ACCESS_KEY}" fi # Now lets assume the role if aws_output="$("${aws_path}" sts assume-role --role-arn "${AWS_ASSUME_ROLE_ARN}" --role-session-name "VagrantCI@${repo_name}-${job_id}")"; then export CORE_AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY_ID}" export CORE_AWS_SECRET_ACCESS_KEY="${AWS_SECRET_ACCESS_KEY}" id="$(printf '%s' "${aws_output}" | jq -r .Credentials.AccessKeyId)" || failed=1 key="$(printf '%s' "${aws_output}" | jq -r .Credentials.SecretAccessKey)" || failed=1 token="$(printf '%s' "${aws_output}" | jq -r .Credentials.SessionToken)" || failed=1 expire="$(printf '%s' "${aws_output}" | jq -r .Credentials.Expiration)" || failed=1 if [ "${failed}" = "1" ]; then (>&2 echo "Failed to extract assume role credentials") return 1 fi unset aws_output export AWS_ACCESS_KEY_ID="${id}" export AWS_SECRET_ACCESS_KEY="${key}" export AWS_SESSION_TOKEN="${token}" export AWS_SESSION_EXPIRATION="${expire}" else (>&2 echo "AWS assume role error: ${aws_output}") return 1 fi # And we can execute! "${aws_path}" "${@}" } # Path to file used for output redirect # and extracting messages for warning and # failure information sent to slack function output_file() { if [ "${1}" = "clean" ] && [ -f "${ci_output_file_path}" ]; then rm -f "${ci_output_file_path}" unset ci_output_file_path fi if [ -z "${ci_output_file_path}" ] || [ ! -f "${ci_output_file_path}" ]; then ci_output_file_path="$(mktemp)" fi printf "%s" "${ci_output_file_path}" } # Write failure message, send error to configured # slack, and exit with non-zero status. If an # "$(output_file)" file exists, the last 5 lines will be # included in the slack message. # # $1: Failure message function failure() { local msg_template="${1}" local i=$(( ${#} - 1 )) local msg_args=("${@:2:$i}") # Update template to include caller information if in DEBUG mode if [ -n "${DEBUG}" ]; then msg_template=$(printf "<%s(%s:%d)> %s" "${FUNCNAME[1]}" "${BASH_SOURCE[1]}" "${BASH_LINENO[0]}" "${msg_template}") fi #shellcheck disable=SC2059 msg="$(printf "${msg_template}" "${msg_args[@]}")" if [ -n "${DEBUG_WITH_BATS}" ]; then printf "%s %b%s%b\n" "${ERROR_ICON}" "${TEXT_RED}" "${msg}" "${TEXT_CLEAR}" >&3 else printf "%s %b%s%b\n" "${ERROR_ICON}" "${TEXT_RED}" "${msg}" "${TEXT_CLEAR}" >&2 fi if [ -n "${SLACK_WEBHOOK}" ]; then if [ -f "$(output_file)" ]; then slack -s error -m "ERROR: ${msg}" -f "$(output_file)" -T 5 else slack -s error -m "ERROR: ${msg}" fi fi exit 1 } # Write warning message, send warning to configured # slack # # $1: Warning message function warn() { local msg_template="${1}" local i=$(( ${#} - 1 )) local msg_args=("${@:2:$i}") #shellcheck disable=SC2059 msg="$(printf "${msg_template}" "${msg_args[@]}")" printf "%s %b%s%b\n" "${WARNING_ICON}" "${TEXT_YELLOW}" "${msg}" "${TEXT_CLEAR}" >&2 if [ -n "${SLACK_WEBHOOK}" ]; then if [ -f "$(output_file)" ]; then slack -s warn -m "WARNING: ${msg}" -f "$(output_file)" else slack -s warn -m "WARNING: ${msg}" fi fi } # Write an informational message function info() { local msg_template="${1}\n" local i=$(( ${#} - 1 )) local msg_args=("${@:2:$i}") #shellcheck disable=SC2059 printf "${msg_template}" "${msg_args[@]}" >&2 } # Execute command while redirecting all output to # a file (file is used within fail mesage on when # command is unsuccessful). Final argument is the # error message used when the command fails. # # $@{1:$#-1}: Command to execute # $@{$#}: Failure message function wrap() { local i=$((${#} - 1)) if ! wrap_raw "${@:1:$i}"; then cat "$(output_file)" failure "${@:$#}" fi rm "$(output_file)" } # Execute command while redirecting all output to # a file. Exit status is returned. function wrap_raw() { output_file "clean" > /dev/null 2>&1 "${@}" > "$(output_file)" 2>&1 return $? } # Execute command while redirecting all output to # a file (file is used within fail mesage on when # command is unsuccessful). Command output will be # streamed during execution. Final argument is the # error message used when the command fails. # # $@{1:$#-1}: Command to execute # $@{$#}: Failure message function wrap_stream() { i=$((${#} - 1)) if ! wrap_stream_raw "${@:1:$i}"; then failure "${@:$#}" fi rm "$(output_file)" } # Execute command while redirecting all output # to a file. Command output will be streamed # during execution. Exit status is returned function wrap_stream_raw() { output_file "clean" "${@}" > "$(output_file)" 2>&1 & pid=$! until [ -f "$(output_file)" ]; do sleep 0.1 done tail -f --quiet --pid "${pid}" "$(output_file)" wait "${pid}" return $? } # Send command to packet device and wrap # execution # $@{1:$#-1}: Command to execute # $@{$#}: Failure message function pkt_wrap() { wrap packet-exec run -quiet -- "${@}" } # Send command to packet device and wrap # execution # $@: Command to execute function pkt_wrap_raw() { wrap_raw packet-exec run -quiet -- "${@}" } # Send command to packet device and wrap # execution with output streaming # $@{1:$#-1}: Command to execute # $@{$#}: Failure message function pkt_wrap_stream() { wrap_stream packet-exec run -quiet -- "${@}" } # Send command to packet device and wrap # execution with output streaming # $@: Command to execute function pkt_wrap_stream_raw() { wrap_stream_raw packet-exec run -quiet -- "${@}" } # Get the full path directory for a given # file path. File is not required to exist. # NOTE: Parent directories of given path will # be created. # # $1: file path function file_directory() { local path="${1?File path is required}" local dir if [[ "${path}" != *"/"* ]]; then dir="." else dir="${path%/*}" fi if [ ! -d "${dir}" ]; then mkdir -p "${dir}" || failure "Could not create directory (%s)" "${dir}" fi pushd "${dir}" dir="$(pwd)" || failure "Could not read directory path (%s)" "${dir}" popd printf "%s" "${dir}" } # Wait until the number of background jobs falls below # the maximum number provided. If the max number was reached # and waiting was performed until a process completed, the # string "waited" will be printed to stdout. # # NOTE: using `wait -n` would be cleaner but only became # available in bash as of 4.3 # # $1: maximum number of jobs function background_jobs_limit() { local max="${1}" if [ -z "${max}" ] || [[ "${max}" = *[!0123456789]* ]]; then failure "Maximum number of background jobs required" fi local debug_printed local jobs mapfile -t jobs <<< "$(jobs -p)" || failure "Could not read background job list" while [ "${#jobs[@]}" -ge "${max}" ]; do if [ -z "${debug_printed}" ]; then debug "max background jobs reached (%d), waiting for free process" "${max}" debug_printed="1" fi sleep 1 jobs=() local j_pids mapfile -t j_pids <<< "$(jobs -p)" || failure "Could not read background job list" for j in "${j_pids[@]}"; do if kill -0 "${j}" > /dev/null 2>&1; then jobs+=( "${j}" ) fi done done if [ -n "${debug_printed}" ]; then debug "background jobs count (%s) under max, continuing" "${#jobs[@]}" printf "waited" fi } # Reap a completed background process. If the process is # not complete, the process is ignored. The success/failure # returned from this function only applies to the process # identified by the provided PID _if_ the matching PID value # was written to stdout # # $1: PID function reap_completed_background_job() { local pid="${1}" if [ -z "${pid}" ]; then failure "PID of process to reap is required" fi if kill -0 "${pid}" > /dev/null 2>&1; then debug "requested pid to reap (%d) has not completed, ignoring" "${pid}" return 0 fi # The pid can be reaped so output the pid to indicate # any error is from the job printf "%s" "${pid}" if ! wait "${pid}"; then local code="${?}" debug "wait error code %d returned for pid %d" "${code}" "${pid}" return "${code}" fi return 0 } # Creates a cache and adds the provided items # # -d Optional description # -f Force cache (deletes cache if already exists) # # $1: name of cache # $2: artifact(s) to cache (path to artifact or directory containing artifacts) function create-cache() { local body local force local opt while getopts ":d:f" opt; do case "${opt}" in "d") body="${OPTARG}" ;; "f") force="1" ;; *) failure "Invalid flag provided" ;; esac done shift $((OPTIND-1)) cache_name="${1}" artifact_path="${2}" if [ -z "${cache_name}" ]; then failure "Cache name is required" fi if [ -z "${artifact_path}" ]; then failure "Artifact path is required" fi # Check for the cache if github_draft_release_exists "${repo_name}" "${cache_name}"; then # If forcing, delete the cache if [ -n "${force}" ]; then debug "cache '%s' found and force is set, removing" github_delete_draft_release "${cache_name}" else failure "Cache already exists (name: %s repo: %s)" "${cache_name}" "${repo_name}" fi fi # If no description is provided, then provide a default if [ -z "${body}" ]; then body="Cache name: %s\nCreate time: %s\nSource run: %s/%s/actions/runs/%s" \ "${cache_name}" "$(date)" "${GITHUB_SERVER_URL}" "${GITHUB_REPOSITORY}" "${GITHUB_RUN_ID}" fi # Make sure body is formatted if [ -n "${body}" ]; then body="$(printf "%b" "${body}")" fi response="$(github_create_release -o "${repo_owner}" -r "${repo_name}" -n "${cache_name}" -b "${body}")" || failure "Failed to create GitHub release" } # Retrieve items from cache # # -r Require cache to exist (failure if not found) # # $1: cache name # $2: destination directory function restore-cache() { local required while getopts ":r" opt; do case "${opt}" in "r") required="1" ;; *) failure "Invalid flag provided" ;; esac done shift $((OPTIND-1)) cache_name="${1}" destination="${2}" if [ -z "${cache_name}" ]; then failure "Cache name is required" fi if [ -z "${destination}" ]; then failure "Destination is required" fi # If required, check for the draft release and error if not found if [ -n "${required}" ]; then if ! github_draft_release_exists "${repo_name}" "${cache_name}"; then failure "Cache '%s' does not exist" "${cache_name}" fi fi mkdir -p "${destination}" || failure "Could not create destination directory (%s)" "${destination}" pushd "${destination}" github_draft_release_assets "${repo_name}" "${cache_name}" popd } # Submit given file to Apple's notarization service and # staple the notarization ticket. # # -i UUID: app store connect issuer ID (optional) # -j PATH: JSON file containing API key # -k ID: app store connect API key ID (optional) # -m SECS: maximum number of seconds to wait (optional, defaults to 600) # -o PATH: path to write notarized file (optional, will modify input by default) # # $1: file to notarize function notarize_file() { local creds_api_key_id local creds_api_key_path local creds_issuer_id local output_file local max_wait="600" local opt while getopts ":i:j:k:m:o:" opt; do case "${opt}" in "i") creds_api_key_id="${OPTARG}" ;; "j") creds_api_key_path="${OPTARG}" ;; "k") creds_issuer_id="${OPTARG}" ;; "m") max_wait="${OPTARG}" ;; "o") output_file="${OPTARG}" ;; *) failure "Invalid flag provided" ;; esac done shift $((OPTIND-1)) # Validate credentials were provided if [ -z "${creds_api_key_path}" ]; then failure "App store connect key path required for notarization" fi if [ ! -f "${creds_api_key_path}" ]; then failure "Invalid path provided for app store connect key path (%s)" "${creds_api_key_path}" fi # Collect auth related arguments local base_args=( "--api-key-path" "${creds_api_key_path}" ) if [ -n "${creds_api_key_id}" ]; then base_args+=( "--api-key" "${creds_api_key_id}" ) fi if [ -n "${creds_issuer_id}" ]; then base_args+=( "--api-issuer" "${creds_issuer_id}" ) fi local input_file="${1}" # Validate the input file if [ -z "${input_file}" ]; then failure "Input file is required for signing" fi if [ ! -f "${input_file}" ]; then failure "Cannot find input file (%s)" "${input_file}" fi # Check that rcodesign is available, and install # it if it is not if ! command -v rcodesign > /dev/null; then debug "rcodesign executable not found, installing..." install_github_tool "indygreg" "apple-platform-rs" "rcodesign" fi local notarize_file # If an output file path was defined, copy file # to output location before notarizing if [ -n "${output_file}" ]; then file_directory "${output_file}" # Remove file if it already exists rm -f "${output_file}" || failure "Could not modify output file (%s)" "${output_file}" cp -f "${input_file}" "${output_file}" || failure "Could not write to output file (%s)" "${output_file}" notarize_file="${output_file}" debug "notarizing file '%s' and writing to '%s'" "${input_file}" "${output_file}" else notarize_file="${input_file}" debug "notarizing file in place '%s'" "${input_file}" fi # Notarize the file local notarize_output if notarize_output="$(rcodesign \ notary-submit \ "${base_args[@]}" \ --max-wait-seconds "${max_wait}" \ --staple \ "${notarize_file}" 2>&1)"; then return 0 fi debug "notarization output: %s" "${notarize_output}" # Still here means notarization failure. Pull # the logs from the service before failing local submission_id="${notarize_output##*submission ID: }" submission_id="${submission_id%%$'\n'*}" rcodesign \ notary-log \ "${base_args[@]}" \ "${submission_id}" failure "Failed to notarize file (%s)" "${input_file}" } # Sign a file using signore. Will automatically apply # modified retry settings when larger files are submitted. # # -b NAME: binary identifier (macOS only) # -e PATH: path to entitlements file (macOS only) # -o PATH: path to write signed file (optional, will overwrite input by default) # $1: file to sign # # NOTE: If signore is not installed, a HASHIBOT_TOKEN is # required for downloading the signore release. The # token can also be set in SIGNORE_GITHUB_TOKEN if # the HASHIBOT_TOKEN is already set # # NOTE: SIGNORE_CLIENT_ID, SIGNORE_CLIENT_SECRET, and SIGNORE_SIGNER # environment variables must be set prior to calling this function function sign_file() { # Set 50M to be a largish file local largish_file_size="52428800" # Signore environment variables are required. Check # that they are set. if [ -z "${SIGNORE_CLIENT_ID}" ]; then failure "Cannot sign file, SIGNORE_CLIENT_ID is not set" fi if [ -z "${SIGNORE_CLIENT_SECRET}" ]; then failure "Cannot sign file, SIGNORE_CLIENT_SECRET is not set" fi if [ -z "${SIGNORE_SIGNER}" ]; then failure "Cannot sign file, SIGNORE_SIGNER is not set" fi local binary_identifier="" local entitlements="" local output_file="" local opt while getopts ":b:e:o:" opt; do case "${opt}" in "b") binary_identifier="${OPTARG}" ;; "e") entitlements="${OPTARG}" ;; "o") output_file="${OPTARG}" ;; *) failure "Invalid flag provided" ;; esac done shift $((OPTIND-1)) local input_file="${1}" # Check that a good input file was given if [ -z "${input_file}" ]; then failure "Input file is required for signing" fi if [ ! -f "${input_file}" ]; then failure "Cannot find input file (%s)" "${input_file}" fi # If the output file is not set it's a replacement if [ -z "${output_file}" ]; then debug "output file is unset, will replace input file (%s)" "${input_file}" output_file="${input_file}" fi # This will ensure parent directories exist file_directory "${output_file}" > /dev/null # If signore command is not installed, install it if ! command -v "signore" > /dev/null; then local hashibot_token_backup="${HASHIBOT_TOKEN}" # If the signore github token is set, apply it if [ -n "${SIGNORE_GITHUB_TOKEN}" ]; then HASHIBOT_TOKEN="${SIGNORE_GITHUB_TOKEN}" fi install_hashicorp_tool "signore" # Restore the hashibot token if it was modified HASHIBOT_TOKEN="${hashibot_token_backup}" fi # Define base set of arguments local signore_args=( "sign" "--file" "${input_file}" "--out" "${output_file}" "--match-file-mode" ) # Check the size of the file to be signed. If it's relatively # large, push up the max retries and lengthen the retry interval # NOTE: Only checked if `wc` is available local file_size="0" if command -v wc > /dev/null; then file_size="$(wc -c <"${input_file}")" || failure "Could not determine input file size" fi if [ "${file_size}" -gt "${largish_file_size}" ]; then debug "largish file detected, adjusting retry settings" signore_args+=( "--max-retries" "30" "--retry-interval" "10s" ) fi # If a binary identifier was provided then it's a macos signing if [ -n "${binary_identifier}" ]; then # shellcheck disable=SC2016 template='{type: "macos", input_format: "EXECUTABLE", binary_identifier: $identifier}' payload="$(jq -n --arg identifier "${binary_identifier}" "${template}")" || failure "Could not create signore payload for macOS signing" signore_args+=( "--signer-options" "${payload}" ) fi # If an entitlement was provided, validate the path # and add it to the args if [ -n "${entitlements}" ]; then if [ ! -f "${entitlements}" ]; then failure "Invalid path for entitlements provided (%s)" "${entitlements}" fi signore_args+=( "--entitlements" "${entitlements}" ) fi debug "signing file '%s' with arguments - %s" "${input_file}" "${signore_args[*]}" signore "${signore_args[@]}" || failure "Failed to sign file '%s'" "${input_file}" info "successfully signed file (%s)" "${input_file}" } # Create a GPG signature. This uses signore to generate a # gpg signature for a given file. If the destination # path for the signature is not provided, it will # be stored at the origin path with a .sig suffix # # $1: Path to origin file # $2: Path to store signature (optional) function gpg_sign_file() { # Check that we have something to sign if [ -z "${1}" ]; then failure "Origin file is required for signing" fi if [ ! -f "${1}" ]; then failure "Origin file does not exist (${1})" fi # Validate environment has required signore variables set if [ -z "${SIGNORE_CLIENT_ID}" ]; then failure "Cannot sign file, SIGNORE_CLIENT_ID is not set" fi if [ -z "${SIGNORE_CLIENT_SECRET}" ]; then failure "Cannot sign file, SIGNORE_CLIENT_SECRET is not set" fi if [ -z "${SIGNORE_SIGNER}" ]; then failure "Cannot sign file, SIGNORE_SIGNER is not set" fi local origin="${1}" local destination="${2}" if [ -z "${destination}" ]; then destination="${origin}.sig" debug "destination automatically set (%s)" "${destination}" fi if ! command -v signore; then debug "installing signore tool" install_hashicorp_tool "signore" fi if [ -e "${destination}" ]; then failure "File already exists at signature destination path (${destination})" fi wrap_stream signore sign --dearmor --file "${origin}" --out "${destination}" \ "Failed to sign file" } # Validate arguments for GitHub release. Checks for # two arguments and that second argument is an exiting # file asset, or directory. # # $1: GitHub tag name # $2: Asset file or directory of assets function release_validate() { if [ "${1}" = "" ]; then failure "Missing required position 1 argument (TAG) for release" fi if [ "${2}" = "" ]; then failure "Missing required position 2 argument (PATH) for release" fi if [ ! -e "${2}" ]; then failure "Path provided for release (${2}) does not exist" fi } # Generate a GitHub release # # $1: GitHub tag name # $2: Asset file or directory of assets function release() { release_validate "${@}" local tag_name="${1}" local assets="${2}" local body if [ -z "${body}" ]; then body="$(release_details "${tag_name}")" fi response="$(github_create_release -o "${repo_owner}" -r "${repo_name}" -t "${tag_name}" -n "${tag_name}" -b "${body}")" || failure "Failed to create GitHub release" local release_id release_id="$(printf "%s" "${response}" | jq -r '.id')" || failure "Failed to extract release ID from response for %s on %s" "${tag_name}" "${repository}" github_upload_release_artifacts "${repo_name}" "${release_id}" "${assets}" } # Generate a GitHub prerelease # # $1: GitHub tag name # $2: Asset file or directory of assets function prerelease() { release_validate "${@}" local ptag if [[ "${1}" != *"+"* ]]; then ptag="${1}+${short_sha}" else ptag="${1}" fi local assets="${2}" response="$(github_create_release -o "${repo_owner}" -r "${repo_name}" -t "${ptag}" -n "${ptag}" -b "${body}" -p -m)" || failure "Failed to create GitHub prerelease" local release_id release_id="$(printf "%s" "${response}" | jq -r '.id')" || failure "Failed to extract prerelease ID from response for %s on %s" "${tag_name}" "${repository}" github_upload_release_artifacts "${repo_name}" "${release_id}" "${assets}" printf "New prerelease published to %s @ %s\n" "${repo_name}" "${ptag}" >&2 printf "%s" "${ptag}" } # Generate a GitHub draft release # # $1: GitHub release name # $2: Asset file or directory of assets function draft_release() { local ptag="${1}" local assets="${2}" response="$(github_create_release -o "${repo_owner}" -r "${repo_name}" -t "${ptag}" -n "${ptag}" -b "${body}" -d)" || failure "Failed to create GitHub draft release" local release_id release_id="$(printf "%s" "${response}" | jq -r '.id')" || failure "Failed to extract draft release ID from response for %s on %s" "${tag_name}" "${repository}" github_upload_release_artifacts "${repo_name}" "${release_id}" "${assets}" printf "%s" "${ptag}" } # Generate details of the release. This will consist # of a link to the changelog if we can properly detect # it based on current location. # # $1: Tag name # # Returns: details content function release_details() { local tag_name="${1}" local proj_root if ! proj_root="$(git rev-parse --show-toplevel)"; then return fi if [ -z "$(git tag -l "${tag_name}")" ] || [ ! -f "${proj_root}/CHANGELOG.md" ]; then return fi printf "CHANGELOG:\n\nhttps://github.com/%s/blob/%s/CHANGELOG.md" "${repository}" "${tag_name}" } # Check if version string is valid for release # # $1: Version # Returns: 0 if valid, 1 if invalid function valid_release_version() { if [[ "${1}" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+$ ]]; then return 0 else return 1 fi } # Validate arguments for HashiCorp release. Ensures asset # directory exists, and checks that the SHASUMS and SHASUM.sig # files are present. # # $1: Asset directory function hashicorp_release_validate() { local directory="${1}" local sums local sigs # Directory checks debug "checking asset directory was provided" if [ -z "${directory}" ]; then failure "No asset directory was provided for HashiCorp release" fi debug "checking that asset directory exists" if [ ! -d "${directory}" ]; then failure "Asset directory for HashiCorp release does not exist (${directory})" fi # SHASUMS checks debug "checking for shasums file" sums=("${directory}/"*SHA256SUMS) if [ ${#sums[@]} -lt 1 ]; then failure "Asset directory is missing SHASUMS file" fi debug "checking for shasums signature file" sigs=("${directory}/"*SHA256SUMS.sig) if [ ${#sigs[@]} -lt 1 ]; then failure "Asset directory is missing SHASUMS signature file" fi } # Verify release assets by validating checksum properly match # and that signature file is valid # # $1: Asset directory function hashicorp_release_verify() { if [ -z "${HASHICORP_PUBLIC_GPG_KEY_ID}" ]; then failure "Cannot verify release without GPG key ID. Set HASHICORP_PUBLIC_GPG_KEY_ID." fi local directory="${1}" local gpghome pushd "${directory}" # First do a checksum validation debug "validating shasums are correct" wrap shasum -a 256 -c ./*_SHA256SUMS \ "Checksum validation of release assets failed" # Next check that the signature is valid gpghome=$(mktemp -qd) export GNUPGHOME="${gpghome}" debug "verifying shasums signature file using key: %s" "${HASHICORP_PUBLIC_GPG_KEY_ID}" wrap gpg --keyserver keyserver.ubuntu.com --recv "${HASHICORP_PUBLIC_GPG_KEY_ID}" \ "Failed to import HashiCorp public GPG key" wrap gpg --verify ./*SHA256SUMS.sig ./*SHA256SUMS \ "Validation of SHA256SUMS signature failed" rm -rf "${gpghome}" popd } # Generate releases-api metadata # # $1: Product Version # $2: Asset directory function hashicorp_release_generate_release_metadata() { local version="${1}" local directory="${2}" if ! command -v bob; then debug "bob executable not found, installing" install_hashicorp_tool "bob" fi local hc_releases_input_metadata="input-meta.json" # The '-metadata-file' flag expects valid json. Contents are not used for Vagrant. echo "{}" > "${hc_releases_input_metadata}" debug "generating release metadata information" wrap_stream bob generate-release-metadata \ -metadata-file "${hc_releases_input_metadata}" \ -in-dir "${directory}" \ -version "${version}" \ -out-file "${hc_releases_metadata_filename}" \ "Failed to generate release metadata" rm -f "${hc_releases_input_metadata}" } # Upload release metadata and assets to the staging api # # $1: Product Name (e.g. "vagrant") # $2: Product Version # $3: Asset directory function hashicorp_release_upload_to_staging() { local product="${1}" local version="${2}" local directory="${3}" if ! command -v "hc-releases"; then debug "releases-api executable not found, installing" install_hashicorp_tool "releases-api" fi if [ -z "${HC_RELEASES_STAGING_HOST}" ]; then failure "Missing required environment variable HC_RELEASES_STAGING_HOST" fi if [ -z "${HC_RELEASES_STAGING_KEY}" ]; then failure "Missing required environment variable HC_RELEASES_STAGING_KEY" fi export HC_RELEASES_HOST="${HC_RELEASES_STAGING_HOST}" export HC_RELEASES_KEY="${HC_RELEASES_STAGING_KEY}" pushd "${directory}" # Create -file parameter list for hc-releases upload local fileParams=() for file in *; do fileParams+=("-file=${file}") done debug "uploading release assets to staging" wrap_stream hc-releases upload \ -product "${product}" \ -version "${version}" \ "${fileParams[@]}" \ "Failed to upload HashiCorp release assets" popd debug "creating release metadata" wrap_stream hc-releases metadata create \ -product "${product}" \ -input "${hc_releases_metadata_filename}" \ "Failed to create metadata for HashiCorp release" unset HC_RELEASES_HOST unset HC_RELEASES_KEY } # Promote release from staging to production # # $1: Product Name (e.g. "vagrant") # $2: Product Version function hashicorp_release_promote_to_production() { local product="${1}" local version="${2}" if ! command -v "hc-releases"; then debug "releases-api executable not found, installing" install_hashicorp_tool "releases-api" fi if [ -z "${HC_RELEASES_PROD_HOST}" ]; then failure "Missing required environment variable HC_RELEASES_PROD_HOST" fi if [ -z "${HC_RELEASES_PROD_KEY}" ]; then failure "Missing required environment variable HC_RELEASES_PROD_KEY" fi if [ -z "${HC_RELEASES_STAGING_KEY}" ]; then failure "Missing required environment variable HC_RELEASES_STAGING_KEY" fi export HC_RELEASES_HOST="${HC_RELEASES_PROD_HOST}" export HC_RELEASES_KEY="${HC_RELEASES_PROD_KEY}" export HC_RELEASES_SOURCE_ENV_KEY="${HC_RELEASES_STAGING_KEY}" debug "promoting release to production" wrap_stream hc-releases promote \ -product "${product}" \ -version "${version}" \ -source-env staging \ "Failed to promote HashiCorp release to Production" unset HC_RELEASES_HOST unset HC_RELEASES_KEY unset HC_RELEASES_SOURCE_ENV_KEY } # Send the post-publish sns message # # $1: Product name (e.g. "vagrant") defaults to $repo_name # $2: AWS Region of SNS (defaults to us-east-1) function hashicorp_release_sns_publish() { local message local product="${1}" local region="${2}" if [ -z "${product}" ]; then product="${repo_name}" fi if [ -z "${region}" ]; then region="us-east-1" fi # Validate the creds properly assume role and function wrap aws_deprecated configure list \ "Failed to reconfigure AWS credentials for release notification" # Now send the release notification debug "sending release notification to package repository" message=$(jq --null-input --arg product "$product" '{"product": $product}') wrap_stream aws sns publish --region "${region}" --topic-arn "${HC_RELEASES_PROD_SNS_TOPIC}" --message "${message}" \ "Failed to send SNS message for package repository update" return 0 } # Check if a release for the given version # has been published to the HashiCorp # releases site. # # $1: Product Name # $2: Product Version function hashicorp_release_exists() { local product="${1}" local version="${2}" if curl --silent --fail --head "https://releases.hashicorp.com/${product}/${product}_${version}/" > /dev/null ; then debug "hashicorp release of %s@%s found" "${product}" "${version}" return 0 fi debug "hashicorp release of %s@%s not found" "${product}" "${version}" return 1 } # Generate the SHA256SUMS file for assets # in a given directory. # # $1: Asset Directory # $2: Product Name # $3: Product Version function generate_shasums() { local directory="${1}" local product="${2}" local version="${3}" pushd "${directory}" local shacontent debug "generating shasums file for %s@%s" "${product}" "${version}" shacontent="$(shasum -a256 ./*)" || failure "Failed to generate shasums in ${directory}" sed 's/\.\///g' <( printf "%s" "${shacontent}" ) > "${product}_${version}_SHA256SUMS" || failure "Failed to write shasums file" popd } # Generate a HashiCorp releases-api compatible release # # $1: Asset directory # $2: Product Name (e.g. "vagrant") # $3: Product Version function hashicorp_release() { local directory="${1}" local product="${2}" local version="${3}" # If the version is provided, use the discovered release version if [[ "${version}" == "" ]]; then version="${release_version}" fi debug "creating hashicorp release - product: %s version: %s assets: %s" "${product}" "${version}" "${directory}" if ! hashicorp_release_exists "${product}" "${version}"; then # Jump into our artifact directory pushd "${directory}" # If any sig files happen to have been included in here, # just remove them as they won't be using the correct # signing key rm -f ./*.sig # Generate our shasums file debug "generating shasums file for %s@%s" "${product}" "${version}" generate_shasums ./ "${product}" "${version}" # Grab the shasums file and sign it local shasum_files=(./*SHA256SUMS) local shasum_file="${shasum_files[0]}" # Remove relative prefix if found shasum_file="${shasum_file##*/}" debug "signing shasums file for %s@%s" "${product}" "${version}" gpg_sign_file "${shasum_file[0]}" # Jump back out of our artifact directory popd # Run validation and verification on release assets before # we actually do the release. debug "running release validation for %s@%s" "${product}" "${version}" hashicorp_release_validate "${directory}" debug "running release verification for %s@%s" "${product}" "${version}" hashicorp_release_verify "${directory}" # Now that the assets have been validated and verified, # peform the release setps debug "generating release metadata for %s@%s" "${product}" "${version}" hashicorp_release_generate_release_metadata "${version}" "${directory}" debug "uploading release artifacts to staging for %s@%s" "${product}" "${version}" hashicorp_release_upload_to_staging "${product}" "${version}" "${directory}" debug "promoting release to production for %s@%s" "${product}" "${version}" hashicorp_release_promote_to_production "${product}" "${version}" printf "HashiCorp release created (%s@%s)\n" "${product}" "${version}" else printf "hashicorp release not published, already exists (%s@%s)\n" "${product}" "${version}" fi # Send a notification to update the package repositories # with the new release. debug "sending packaging notification for %s@%s" "${product}" "${version}" hashicorp_release_sns_publish "${product}" } # Check if gem version is already published to RubyGems # # $1: Name of RubyGem # $2: Verision of RubyGem # $3: Custom gem server to search (optional) function is_version_on_rubygems() { local name="${1}" local version="${2}" local gemstore="${3}" if [ -z "${name}" ]; then failure "Name is required for version check on %s" "${gemstore:-RubyGems.org}" fi if [ -z "${version}" ]; then failure "Version is required for version check on %s" "${gemstore:-RubyGems.org}" fi debug "checking rubygem %s at version %s is currently published" "${name}" "${version}" local cmd_args=("gem" "search") if [ -n "${gemstore}" ]; then debug "checking rubygem publication at custom source: %s" "${gemstore}" cmd_args+=("--clear-sources" "--source" "${gemstore}") fi cmd_args+=("--remote" "--exact" "--all") local result result="$("${cmd_args[@]}" "${name}")" || failure "Failed to retreive remote version list from RubyGems" local versions="${result##*\(}" local versions="${versions%%)*}" local oifs="${IFS}" IFS=', ' local r=1 for v in $versions; do if [ "${v}" = "${version}" ]; then r=0 debug "rubygem %s at version %s was found" "${name}" "${version}" break fi done IFS="${oifs}" return $r } # Check if gem version is already published to hashigems # # $1: Name of RubyGem # $2: Verision of RubyGem function is_version_on_hashigems() { is_version_on_rubygems "${1}" "${2}" "https://gems.hashicorp.com" } # Build and release project gem to RubyGems function publish_to_rubygems() { if [ -z "${RUBYGEMS_API_KEY}" ]; then failure "RUBYGEMS_API_KEY is required for publishing to RubyGems.org" fi local gem_file="${1}" if [ -z "${gem_file}" ]; then failure "RubyGem file is required for publishing to RubyGems.org" fi if [ ! -f "${gem_file}" ]; then failure "Path provided does not exist or is not a file (%s)" "${gem_file}" fi # NOTE: Newer versions of rubygems support setting the # api key via the GEM_HOST_API_KEY environment # variable. Config file is still used so that older # versions can be used for doing pushes. gem_config="$(mktemp -p ./)" || failure "Could not create gem configuration file" # NOTE: The `--` are required due to the double dash # start of the first argument printf -- "---\n:rubygems_api_key: %s\n" "${RUBYGEMS_API_KEY}" > "${gem_config}" gem push --config-file "${gem_config}" "${gem_file}" || failure "Failed to publish RubyGem at '%s' to RubyGems.org" "${gem_file}" rm -f "${gem_config}" } # Publish gem to the hashigems repository # # $1: Path to gem file to publish function publish_to_hashigems() { local path="${1}" if [ -z "${path}" ]; then failure "Path to built gem required for publishing to hashigems" fi debug "publishing '%s' to hashigems" "${path}" # Define all the variables we'll need local user_bin local reaper local invalid local invalid_id wrap_stream gem install --user-install --no-document reaper-man \ "Failed to install dependency for hashigem generation" user_bin="$(ruby -e 'puts Gem.user_dir')/bin" reaper="${user_bin}/reaper-man" debug "using reaper-man installation at: %s" "${reaper}" # Create a temporary directory to work from local tmpdir tmpdir="$(mktemp -d -p ./)" || failure "Failed to create working directory for hashigems publish" mkdir -p "${tmpdir}/hashigems/gems" || failure "Failed to create gems directory" wrap cp "${path}" "${tmpdir}/hashigems/gems" \ "Failed to copy gem to working directory" pushd "${tmpdir}" # Run quick test to ensure bucket is accessible wrap aws s3 ls "s3://${HASHIGEMS_METADATA_BUCKET}" \ "Failed to access hashigems asset bucket" # Grab our remote metadata. If the file doesn't exist, that is always an error. debug "fetching hashigems metadata file from %s" "${HASHIGEMS_METADATA_BUCKET}" wrap aws s3 cp "s3://${HASHIGEMS_METADATA_BUCKET}/vagrant-rubygems.list" ./ \ "Failed to retrieve hashigems metadata list" # Add the new gem to the metadata file debug "adding new gem to the metadata file" wrap_stream "${reaper}" package add -S rubygems -p vagrant-rubygems.list ./hashigems/gems/*.gem \ "Failed to add new gem to hashigems metadata list" # Generate the repository debug "generating the new hashigems repository content" wrap_stream "${reaper}" repo generate -p vagrant-rubygems.list -o hashigems -S rubygems \ "Failed to generate the hashigems repository" # Upload the updated repository pushd ./hashigems debug "uploading new hashigems repository content to %s" "${HASHIGEMS_PUBLIC_BUCKET}" wrap_stream aws s3 sync . "s3://${HASHIGEMS_PUBLIC_BUCKET}" \ "Failed to upload the hashigems repository" # Store the updated metadata popd debug "uploading updated hashigems metadata file to %s" "${HASHIGEMS_METADATA_BUCKET}" wrap_stream aws s3 cp vagrant-rubygems.list "s3://${HASHIGEMS_METADATA_BUCKET}/vagrant-rubygems.list" \ "Failed to upload the updated hashigems metadata file" # Invalidate cloudfront so the new content is available local invalid debug "invalidating hashigems cloudfront distribution (%s)" "${HASHIGEMS_CLOUDFRONT_ID}" invalid="$(aws cloudfront create-invalidation --distribution-id "${HASHIGEMS_CLOUDFRONT_ID}" --paths "/*")" || failure "Invalidation of hashigems CDN distribution failed" local invalid_id invalid_id="$(printf '%s' "${invalid}" | jq -r ".Invalidation.Id")" if [ -z "${invalid_id}" ]; then failure "Failed to determine the ID of the hashigems CDN invalidation request" fi debug "hashigems cloudfront distribution invalidation identifer - %s" "${invalid_id}" # Wait for the invalidation process to complete debug "starting wait for hashigems cloudfront distribution invalidation to complete (id: %s)" "${invalid_id}" wrap aws cloudfront wait invalidation-completed --distribution-id "${HASHIGEMS_CLOUDFRONT_ID}" --id "${invalid_id}" \ "Failure encountered while waiting for hashigems CDN invalidation request to complete (ID: ${invalid_id})" debug "hashigems cloudfront distribution invalidation complete (id: %s)" "${invalid_id}" # Clean up and we are done popd rm -rf "${tmpdir}" } # Configures git for hashibot usage function hashibot_git() { wrap git config user.name "${HASHIBOT_USERNAME}" \ "Failed to setup git for hashibot usage (username)" wrap git config user.email "${HASHIBOT_EMAIL}" \ "Failed to setup git for hashibot usage (email)" wrap git remote set-url origin "https://${HASHIBOT_USERNAME}:${HASHIBOT_TOKEN}@github.com/${repository}" \ "Failed to setup git for hashibot usage (remote)" } # Get the default branch name for the current repository function default_branch() { local s s="$(git symbolic-ref refs/remotes/origin/HEAD)" || failure "Failed to determine default branch (is working directory git repository?)" printf "%s" "${s##*origin/}" } # Send a notification to slack. All flag values can be set with # environment variables using the upcased name prefixed with SLACK_, # for example: --channel -> SLACK_CHANNEL # # -c --channel CHAN Send to channel # -u --username USER Send as username # -i --icon URL User icon image # -s --state STATE Message state (success, warn, error, or color code) # -m --message MESSAGE Message to send # -M --message-file PATH Use file contents as message # -f --file PATH Send raw contents of file in message (displayed in code block) # -t --title TITLE Message title # -T --tail NUMBER Send last NUMBER lines of content from raw message file # -w --webhook URL Slack webhook function slack() { # Convert any long names to short names for arg in "$@"; do shift case "${arg}" in "--channel") set -- "${@}" "-c" ;; "--username") set -- "${@}" "-u" ;; "--icon") set -- "${@}" "-i" ;; "--state") set -- "${@}" "-s" ;; "--message") set -- "${@}" "-m" ;; "--message-file") set -- "${@}" "-M" ;; "--file") set -- "${@}" "-f" ;; "--title") set -- "${@}" "-t" ;; "--tail") set -- "${@}" "-T" ;; "--webhook") set -- "${@}" "-w" ;; *) set -- "${@}" "${arg}" ;; esac done local OPTIND opt # Default all options to values provided by environment variables local channel="${SLACK_CHANNEL}" local username="${SLACK_USERNAME}" local icon="${SLACK_ICON}" local state="${SLACK_STATE}" local message="${SLACK_MESSAGE}" local message_file="${SLACK_MESSAGE_FILE}" local file="${SLACK_FILE}" local title="${SLACK_TITLE}" local tail="${SLACK_TAIL}" local webhook="${SLACK_WEBHOOK}" while getopts ":c:u:i:s:m:M:f:t:T:w:" opt; do case "${opt}" in "c") channel="${OPTARG}" ;; "u") username="${OPTARG}" ;; "i") icon="${OPTARG}" ;; "s") state="${OPTARG}" ;; "m") message="${OPTARG}" ;; "M") message_file="${OPTARG}" ;; "f") file="${OPTARG}" ;; "t") title="${OPTARG}" ;; "T") tail="${OPTARG}" ;; "w") webhook="${OPTARG}" ;; *) failure "Invalid flag provided to slack" ;; esac done shift $((OPTIND-1)) # If we don't have a webhook provided, stop here if [ -z "${webhook}" ]; then (>&2 echo "ERROR: Cannot send Slack notification, webhook unset") return 1 fi local footer footer_icon ts # If we are using GitHub actions, format the footer if [ -n "${GITHUB_ACTIONS}" ]; then if [ -z "${icon}" ]; then icon="https://ca.slack-edge.com/T024UT03C-WG8NDATGT-f82ae03b9fca-48" fi if [ -z "${username}" ]; then username="GitHub" fi footer_icon="https://ca.slack-edge.com/T024UT03C-WG8NDATGT-f82ae03b9fca-48" footer="Actions - " fi # If no state was provided, default to good state if [ -z "${state}" ]; then state="good" fi # Convert state aliases case "${state}" in "success" | "good") state="good";; "warn" | "warning") state="warning";; "error" | "danger") state="danger";; esac # If we have a message file, read it if [ -n "${message_file}" ]; then local message_file_content message_file_content="$(<"${message_file}")" if [ -z "${message}" ]; then message="${message_file_content}" else message="${message}\n\n${message_file_content}" fi fi # If we have a file to include, add it now. Files are # displayed as raw content, so be sure to wrap with # backticks if [ -n "${file}" ]; then local file_content # If tail is provided, then only include the last n number # of lines in the file if [ -n "${tail}" ]; then if ! file_content="$(tail -n "${tail}" "${file}")"; then file_content="UNEXPECTED ERROR: Failed to tail content in file ${file}" fi else file_content="$(<"${file}")" fi if [ -n "${file_content}" ]; then message="${message}\n\n\`\`\`\n${file_content}\n\`\`\`" fi fi local attach attach_template payload payload_template ts ts="$(date '+%s')" # shellcheck disable=SC2016 attach_template='{text: $msg, color: $state, mrkdwn_in: ["text"], ts: $time' if [ -n "${title}" ]; then # shellcheck disable=SC2016 attach_template+=', title: $title' fi if [ -n "${footer}" ]; then # shellcheck disable=SC2016 attach_template+=', footer: $footer' fi if [ -n "${footer_icon}" ]; then # shellcheck disable=SC2016 attach_template+=', footer_icon: $footer_icon' fi attach_template+='}' attach=$(jq -n \ --arg msg "$(printf "%b" "${message}")" \ --arg title "${title}" \ --arg state "${state}" \ --arg time "${ts}" \ --arg footer "${footer}" \ --arg footer_icon "${footer_icon}" \ "${attach_template}" \ ) # shellcheck disable=SC2016 payload_template='{attachments: [$attachment]' if [ -n "${username}" ]; then # shellcheck disable=SC2016 payload_template+=', username: $username' fi if [ -n "${channel}" ]; then # shellcheck disable=SC2016 payload_template+=', channel: $channel' fi if [ -n "${icon}" ]; then # shellcheck disable=SC2016 payload_template+=', icon_url: $icon' fi payload_template+='}' payload=$(jq -n \ --argjson attachment "${attach}" \ --arg username "${username}" \ --arg channel "${channel}" \ --arg icon "${icon}" \ "${payload_template}" \ ) debug "sending slack message with payload: %s" "${payload}" if ! curl -SsL --fail -X POST -H "Content-Type: application/json" -d "${payload}" "${webhook}"; then echo "ERROR: Failed to send slack notification" >&2 fi } # Install internal HashiCorp tools. These tools are expected to # be located in private (though not required) HashiCorp repositories. # It will attempt to download the correct artifact for the current # platform based on HashiCorp naming conventions. It expects that # the name of the repository is the name of the tool. # # $1: Name of repository function install_hashicorp_tool() { local tool_name="${1}" local extensions=("zip" "tar.gz") local asset release_content tmp if [ -z "${tool_name}" ]; then failure "Repository name is required for hashicorp tool install" fi debug "installing hashicorp tool: %s" "${tool_name}" # Swap out repository to force correct github token local repository_bak="${repository}" repository="${repo_owner}/${release_repo}" tmp="$(mktemp -d --tmpdir vagrantci-XXXXXX)" || failure "Failed to create temporary working directory" pushd "${tmp}" local platform platform="$(uname -s)" || failure "Failed to get local platform name" platform="${platform,,}" # downcase the platform name local arches=() local arch arch="$(uname -m)" || failure "Failed to get local platform architecture" arches+=("${arch}") # If the architecture is listed as x86_64, add amd64 to the # arches collection. Hashicorp naming scheme is to use amd64 in # the file name, but isn't always followed if [ "${arch}" = "x86_64" ]; then arches+=("amd64") fi release_content=$(github_request -H "Content-Type: application/json" \ "https://api.github.com/repos/hashicorp/${tool_name}/releases/latest") || failure "Failed to request latest releases for hashicorp/${tool_name}" local exten for exten in "${extensions[@]}"; do for arch in "${arches[@]}"; do local suffix="${platform}_${arch}.${exten}" debug "checking for release artifact with suffix: %s" "${suffix}" asset=$(printf "%s" "${release_content}" | jq -r \ '.assets[] | select(.name | contains("'"${suffix}"'")) | .url') if [ -n "${asset}" ]; then debug "release artifact found: %s" "${asset}" break fi done if [ -n "${asset}" ]; then break fi done if [ -z "${asset}" ]; then failure "Failed to find release of hashicorp/${tool_name} for ${platform} ${arch[0]}" fi debug "tool artifact match found for install: %s" "${asset}" github_request -o "${tool_name}.${exten}" \ -H "Accept: application/octet-stream" "${asset}" || "Failed to download latest release for hashicorp/${tool_name}" if [ "${exten}" = "zip" ]; then wrap unzip "${tool_name}.${exten}" \ "Failed to unpack latest release for hashicorp/${tool_name}" else wrap tar xf "${tool_name}.${exten}" \ "Failed to unpack latest release for hashicorp/${tool_name}" fi rm -f "${tool_name}.${exten}" local files=( ./* ) wrap chmod 0755 ./* \ "Failed to change mode on latest release for hashicorp/${tool_name}" wrap mv ./* "${ci_bin_dir}" \ "Failed to install latest release for hashicorp/${tool_name}" debug "new files added to path: %s" "${files[*]}" popd rm -rf "${tmp}" repository="${repository_bak}" # restore the repository value } # Install tool from GitHub releases. It will fetch the latest release # of the tool and install it. The proper release artifact will be matched # by a "linux_amd64" string. This command is best effort and may not work. # # $1: Organization name # $2: Repository name # $3: Tool name (optional) function install_github_tool() { local org_name="${1}" local r_name="${2}" local tool_name="${3}" if [ -z "${tool_name}" ]; then tool_name="${r_name}" fi local asset release_content tmp local artifact_list artifact basen tmp="$(mktemp -d --tmpdir vagrantci-XXXXXX)" || failure "Failed to create temporary working directory" pushd "${tmp}" debug "installing github tool %s from %s/%s" "${tool_name}" "${org_name}" "${r_name}" release_content=$(github_request -H "Content-Type: application/json" \ "https://api.github.com/repos/${org_name}/${r_name}/releases/latest") || failure "Failed to request latest releases for ${org_name}/${r_name}" asset=$(printf "%s" "${release_content}" | jq -r \ '.assets[] | select( ( (.name | contains("amd64")) or (.name | contains("x86_64")) or (.name | contains("x86-64")) ) and (.name | contains("linux")) and (.name | endswith("sha256") | not) and (.name | endswith("sig") | not)) | .url') || failure "Failed to detect latest release for ${org_name}/${r_name}" artifact="${asset##*/}" github_request -o "${artifact}" -H "Accept: application/octet-stream" "${asset}" || "Failed to download latest release for ${org_name}/${r_name}" basen="${artifact##*.}" if [ "${basen}" = "zip" ]; then wrap unzip "${artifact}" \ "Failed to unpack latest release for ${org_name}/${r_name}" rm -f "${artifact}" elif [ -n "${basen}" ]; then wrap tar xf "${artifact}" \ "Failed to unpack latest release for ${org_name}/${r_name}" rm -f "${artifact}" fi artifact_list=(./*) # If the artifact only contained a directory, get # the contents of the directory if [ "${#artifact_list[@]}" -eq "1" ] && [ -d "${artifact_list[0]}" ]; then debug "unpacked artifact contained only directory, inspecting contents" artifact_list=( "${artifact_list[0]}/"* ) fi local tool_match tool_glob_match executable_match local item for item in "${artifact_list[@]}"; do if [ "${item##*/}" = "${tool_name}" ]; then debug "tool name match found: %s" "${item}" tool_match="${item}" elif [ -e "${item}" ]; then debug "executable match found: %s" "${item}" executable_match="${item}" elif [[ "${item}" = "${tool_name}"* ]]; then debug "tool name glob match found: %s" "${item}" tool_glob_match="${item}" fi done # Install based on best match to worst match if [ -n "${tool_match}" ]; then debug "installing %s from tool name match (%s)" "${tool_name}" "${tool_match}" mv -f "${tool_match}" "${ci_bin_dir}/${tool_name}" || "Failed to install latest release of %s from %s/%s" "${tool_name}" "${org_name}" "${r_name}" elif [ -n "${tool_glob_match}" ]; then debug "installing %s from tool name glob match (%s)" "${tool_name}" "${tool_glob_match}" mv -f "${tool_glob_match}" "${ci_bin_dir}/${tool_name}" || "Failed to install latest release of %s from %s/%s" "${tool_name}" "${org_name}" "${r_name}" elif [ -n "${executable_match}" ]; then debug "installing %s from executable file match (%s)" "${tool_name}" "${executable_match}" mv -f "${executable_match}" "${ci_bin_dir}/${tool_name}" || "Failed to install latest release of %s from %s/%s" "${tool_name}" "${org_name}" "${r_name}" else failure "Failed to locate tool '%s' in latest release from %s/%s" "${org_name}" "${r_name}" fi popd rm -rf "${tmp}" } # Prepare host for packet use. It will validate the # required environment variables are set, ensure # packet-exec is installed, and setup the SSH key. function packet-setup() { # First check that we have the environment variables if [ -z "${PACKET_EXEC_TOKEN}" ]; then failure "Cannot setup packet, missing token" fi if [ -z "${PACKET_EXEC_PROJECT_ID}" ]; then failure "Cannot setup packet, missing project" fi if [ -z "${PACKET_SSH_KEY_CONTENT}" ]; then failure "Cannot setup packet, missing ssh key" fi install_hashicorp_tool "packet-exec" # Write the ssh key to disk local content content="$(base64 --decode - <<< "${PACKET_SSH_KEY_CONTENT}")" || failure "Cannot setup packet, failed to decode key" touch ./packet-key chmod 0600 ./packet-key printf "%s" "${content}" > ./packet-key local working_directory working_directory="$(pwd)" || failure "Cannot setup packet, failed to determine working directory" export PACKET_EXEC_SSH_KEY="${working_directory}/packet-key" } # Download artifact(s) from GitHub release. The artifact pattern is simply # a substring that is matched against the artifact download URL. Artifact(s) # will be downloaded to the working directory. # # $1: repository name # $2: release tag name # $3: artifact pattern (optional, all artifacts downloaded if omitted) function github_release_assets() { local req_args req_args=() local asset_pattern local release_repo="${1}" local release_name="${2}" local asset_pattern="${3}" # Swap out repository to force correct github token local repository_bak="${repository}" repository="${repo_owner}/${release_repo}" req_args+=("-H" "Accept: application/vnd.github+json") req_args+=("https://api.github.com/repos/${repository}/releases/tags/${release_name}") debug "fetching release asset list for release %s on %s" "${release_name}" "${repository}" local release_content release_content=$(github_request "${req_args[@]}") || failure "Failed to request release (${release_name}) for ${repository}" local query=".assets[]" if [ -n "${asset_pattern}" ]; then debug "applying release asset list filter %s" "${asset_pattern}" query+="$(printf ' | select(.name | contains("%s"))' "${asset_pattern}")" fi local asset_list asset_list=$(printf "%s" "${release_content}" | jq -r "${query} | .url") || failure "Failed to detect asset in release (${release_name}) for ${release_repo}" local name_list name_list=$(printf "%s" "${release_content}" | jq -r "${query} | .name") || failure "Failed to detect asset in release (${release_name}) for ${release_repo}" req_args=() req_args+=("-H" "Accept: application/octet-stream") local assets asset_names readarray -t assets < <(printf "%s" "${asset_list}") readarray -t asset_names < <(printf "%s" "${name_list}") local idx for ((idx=0; idx<"${#assets[@]}"; idx++ )); do local asset="${assets[$idx]}" local artifact="${asset_names[$idx]}" github_request "${req_args[@]}" -o "${artifact}" "${asset}" || "Failed to download asset (${artifact}) in release ${release_name} for ${repository}" printf "downloaded release asset %s from release %s on %s" "${artifact}" "${release_name}" "${repository}" done repository="${repository_bak}" # restore the repository value } # Basic helper to create a GitHub prerelease # # $1: repository name # $2: tag name for release # $3: path to artifact(s) - single file or directory function github_prerelease() { local prerelease_repo="${1}" local tag_name="${2}" local artifacts="${3}" if [ -z "${prerelease_repo}" ]; then failure "Name of repository required for prerelease release" fi if [ -z "${tag_name}" ]; then failure "Name is required for prerelease release" fi if [ -z "${artifacts}" ]; then failure "Artifacts path is required for prerelease release" fi if [ ! -e "${artifacts}" ]; then failure "No artifacts found at provided path (${artifacts})" fi local prerelease_target="${repo_owner}/${prerelease_repo}" # Create the prerelease local response response="$(github_create_release -p -t "${tag_name}" -o "${repo_owner}" -r "${prerelease_repo}" )" || failure "Failed to create prerelease on %s/%s" "${repo_owner}" "${prerelease_repo}" # Extract the release ID from the response local release_id release_id="$(printf "%s" "${response}" | jq -r '.id')" || failure "Failed to extract prerelease ID from response for ${tag_name} on ${prerelease_target}" github_upload_release_artifacts "${prerelease_repo}" "${release_id}" "${artifacts}" } # Upload artifacts to a release # # $1: target repository name # $2: release ID # $3: path to artifact(s) - single file or directory function github_upload_release_artifacts() { local target_repo_name="${1}" local release_id="${2}" local artifacts="${3}" if [ -z "${target_repo_name}" ]; then failure "Repository name required for release artifact upload" fi if [ -z "${release_id}" ]; then failure "Release ID require for release artifact upload" fi if [ -z "${artifacts}" ]; then failure "Artifacts required for release artifact upload" fi if [ ! -e "${artifacts}" ]; then failure "No artifacts found at provided path for release artifact upload (%s)" "${artifacts}" fi # Swap out repository to force correct github token local repository_bak="${repository}" repository="${repo_owner}/${target_repo_name}" local req_args=("-X" "POST" "-H" "Content-Type: application/octet-stream") # Now upload the artifacts to the draft release local artifact_name if [ -f "${artifacts}" ]; then debug "uploading %s to release ID %s on %s" "${artifact}" "${release_id}" "${repository}" artifact_name="${artifacts##*/}" req_args+=("https://uploads.github.com/repos/${repository}/releases/${release_id}/assets?name=${artifact_name}" "--data-binary" "@${artifacts}") if ! github_request "${req_args[@]}" > /dev/null ; then failure "Failed to upload artifact '${artifacts}' to draft release on ${repository}" fi printf "Uploaded release artifact: %s\n" "${artifact_name}" >&2 # Everything is done so get on outta here return 0 fi # Push into the directory pushd "${artifacts}" local artifact_path # Walk through each item and upload for artifact_path in * ; do if [ ! -f "${artifact_path}" ]; then debug "skipping '%s' as it is not a file" "${artifact_path}" continue fi artifact_name="${artifact_path##*/}" debug "uploading %s/%s to release ID %s on %s" "${artifacts}" "${artifact_name}" "${release_id}" "${repository}" local r_args=( "${req_args[@]}" ) r_args+=("https://uploads.github.com/repos/${repository}/releases/${release_id}/assets?name=${artifact_name}" "--data-binary" "@${artifact_path}") if ! github_request "${r_args[@]}" > /dev/null ; then failure "Failed to upload artifact '${artifact_name}' in '${artifacts}' to draft release on ${repository}" fi printf "Uploaded release artifact: %s\n" "${artifact_name}" >&2 done repository="${repository_bak}" } # Basic helper to create a GitHub draft release # # $1: repository name # $2: tag name for release # $3: path to artifact(s) - single file or directory function github_draft_release() { local draft_repo="${1}" local tag_name="${2}" local artifacts="${3}" if [ -z "${draft_repo}" ]; then failure "Name of repository required for draft release" fi if [ -z "${tag_name}" ]; then failure "Name is required for draft release" fi if [ -z "${artifacts}" ]; then failure "Artifacts path is required for draft release" fi if [ ! -e "${artifacts}" ]; then failure "No artifacts found at provided path (%s)" "${artifacts}" fi # Create the draft release local response response="$(github_create_release -d -t "${tag_name}" -o "${repo_owner}" -r "${draft_repo}" )" || failure "Failed to create draft release on %s" "${repo_owner}/${draft_repo}" # Extract the release ID from the response local release_id release_id="$(printf "%s" "${response}" | jq -r '.id')" || failure "Failed to extract draft release ID from response for %s on %s" "${tag_name}" "${repo_owner}/${draft_repo}" github_upload_release_artifacts "${draft_repo}" "${release_id}" "${artifacts}" } # Create a GitHub release # # -b BODY - body of release # -c COMMITISH - commitish of release # -n NAME - name of the release # -o OWNER - repository owner (required) # -r REPO - repository name (required) # -t TAG_NAME - tag name for release (required) # -d - draft release # -p - prerelease # -g - generate release notes # -m - make release latest # # NOTE: Artifacts for release must be uploaded using `github_upload_release_artifacts` function github_create_release() { local OPTIND opt owner repo tag_name # Values that can be null local body commitish name # Values we default local draft="false" local generate_notes="false" local make_latest="false" local prerelease="false" while getopts ":b:c:n:o:r:t:dpgm" opt; do case "${opt}" in "b") body="${OPTARG}" ;; "c") commitish="${OPTARG}" ;; "n") name="${OPTARG}" ;; "o") owner="${OPTARG}" ;; "r") repo="${OPTARG}" ;; "t") tag_name="${OPTARG}" ;; "d") draft="true" ;; "p") prerelease="true" ;; "g") generate_notes="true" ;; "m") make_latest="true" ;; *) failure "Invalid flag provided to github_create_release" ;; esac done shift $((OPTIND-1)) # Sanity check if [ -z "${owner}" ]; then failure "Repository owner value is required for GitHub release" fi if [ -z "${repo}" ]; then failure "Repository name is required for GitHub release" fi if [ -z "${tag_name}" ] && [ "${draft}" != "true" ]; then failure "Tag name is required for GitHub release" fi if [ "${draft}" = "true" ] && [ "${prerelease}" = "true" ]; then failure "Release cannot be both draft and prerelease" fi # If no name is provided, use the tag name value if [ -z "${name}" ]; then name="${tag_name}" fi # shellcheck disable=SC2016 local payload_template='{tag_name: $tag_name, draft: $draft, prerelease: $prerelease, generate_release_notes: $generate_notes, make_latest: $make_latest' local jq_args=("-n" "--arg" "tag_name" "${tag_name}" "--arg" "make_latest" "${make_latest}" "--argjson" "draft" "${draft}" "--argjson" "generate_notes" "${generate_notes}" "--argjson" "prerelease" "${prerelease}" ) if [ -n "${commitish}" ]; then # shellcheck disable=SC2016 payload_template+=', target_commitish: $commitish' jq_args+=("--arg" "commitish" "${commitish}") fi if [ -n "${name}" ]; then # shellcheck disable=SC2016 payload_template+=', name: $name' jq_args+=("--arg" "name" "${name}") fi if [ -n "${body}" ]; then # shellcheck disable=SC2016 payload_template+=', body: $body' jq_args+=("--arg" "body" "${body}") fi payload_template+='}' # Generate the payload local payload payload="$(jq "${jq_args[@]}" "${payload_template}" )" || failure "Could not generate GitHub release JSON payload" local target_repo="${owner}/${repo}" # Set repository to get correct token behavior on request local repository_bak="${repository}" repository="${target_repo}" # Craft our request arguments local req_args=("-X" "POST" "https://api.github.com/repos/${target_repo}/releases" "-d" "${payload}") # Create the draft release local response if ! response="$(github_request "${req_args[@]}")"; then failure "Could not create github release on ${target_repo}" fi # Restore the repository repository="${repository_bak}" local rel_type if [ "${draft}" = "true" ]; then rel_type="draft release" elif [ "${prerelease}" = "true" ]; then rel_type="prerelease" else rel_type="release" fi # Report new draft release was created printf "New %s '%s' created on '%s'\n" "${rel_type}" "${tag_name}" "${target_repo}" >&2 # Print the response printf "%s" "${response}" } # Check if a github release exists by tag name # NOTE: This can be used for release and prerelease checks. # Draft releases must use the github_draft_release_exists # function. # # $1: repository name # $2: release tag name function github_release_exists() { local release_repo="${1}" local release_name="${2}" if [ -z "${release_repo}" ]; then failure "Repository name required for release lookup" fi if [ -z "${release_name}" ]; then failure "Release name required for release lookup" fi # Override repository value to get correct token automatically local repository_bak="${repository}" repository="${repo_owner}/${release_repo}" local result="1" if github_request \ -H "Accept: application/vnd.github+json" \ "https://api.github.com/repos/${repository}/releases/tags/${release_name}" > /dev/null; then debug "release '${release_name}' found in ${repository}" result="0" else debug "release '${release_name}' not found in ${repository}" fi # Restore repository value repository="${repository_bak}" return "${result}" } # Check if a github release exists using fuzzy match # # $1: repository name # $2: release name function github_release_exists_fuzzy() { local release_repo="${1}" local release_name="${2}" if [ -z "${release_repo}" ]; then failure "Repository name required for draft release lookup" fi if [ -z "${release_name}" ]; then failure "Release name required for draft release lookup" fi # Override repository value to get correct token automatically local repository_bak="${repository}" repository="${repo_owner}/${release_repo}" local page=$((1)) local matched_name while [ -z "${release_content}" ]; do local release_list release_list="$(github_request \ -H "Content-Type: application/json" \ "https://api.github.com/repos/${repository}/releases?per_page=100&page=${page}")" || failure "Failed to request releases list for ${repository}" # If there's no more results, just bust out of the loop if [ "$(jq 'length' <( printf "%s" "${release_list}" ))" -lt "1" ]; then break fi local names name_list n matched_name name_list="$(printf "%s" "${release_list}" | jq '.[] | .name')" || failure "Could not generate name list" # shellcheck disable=SC2206 names=( $name_list ) for n in "${names[@]}"; do if [[ "${n}" =~ $release_name ]]; then matched_name="${n}" break fi done if [ -n "${matched_name}" ]; then break fi ((page++)) done # Restore the $repository value repository="${repository_bak}" if [ -z "${matched_name}" ]; then debug "did not locate release named %s for %s" "${release_name}" "${repo_owner}/${release_repo}" return 1 fi debug "found release name %s in %s (pattern: %s)" "${matched_name}" "${repo_owner}/${release_repo}" "${release_name}" return 0 } # Check if a draft release exists by name # # $1: repository name # $2: release name function github_draft_release_exists() { local release_repo="${1}" local release_name="${2}" if [ -z "${release_repo}" ]; then failure "Repository name required for draft release lookup" fi if [ -z "${release_name}" ]; then failure "Release name required for draft release lookup" fi # Override repository value to get correct token automatically local repository_bak="${repository}" repository="${repo_owner}/${release_repo}" local page=$((1)) local release_content while [ -z "${release_content}" ]; do local release_list release_list="$(github_request \ -H "Content-Type: application/json" \ "https://api.github.com/repos/${repository}/releases?per_page=100&page=${page}")" || failure "Failed to request releases list for ${repository}" # If there's no more results, just bust out of the loop if [ "$(jq 'length' <( printf "%s" "${release_list}" ))" -lt "1" ]; then break fi query="$(printf '.[] | select(.name == "%s")' "${release_name}")" release_content=$(printf "%s" "${release_list}" | jq -r "${query}") ((page++)) done # Restore the $repository value repository="${repository_bak}" if [ -z "${release_content}" ]; then debug "did not locate draft release named %s for %s" "${release_name}" "${repo_owner}/${release_repo}" return 1 fi debug "found draft release name %s in %s" "${release_name}" "${repo_owner}/${release_repo}" return 0 } # Download artifact(s) from GitHub draft release. A draft release is not # attached to a tag and therefore is referenced by the release name directly. # The artifact pattern is simply a substring that is matched against the # artifact download URL. Artifact(s) will be downloaded to the working directory. # # $1: repository name # $2: release name # $3: artifact pattern (optional, all artifacts downloaded if omitted) function github_draft_release_assets() { local release_repo_name="${1}" local release_name="${2}" local asset_pattern="${3}" if [ -z "${release_repo_name}" ]; then failure "Repository name is required for draft release asset fetching" fi if [ -z "${release_name}" ]; then failure "Draft release name is required for draft release asset fetching" fi # Override repository value to get correct token automatically local repository_bak="${repository}" repository="${repo_owner}/${release_repo_name}" local page=$((1)) local release_content query while [ -z "${release_content}" ]; do local release_list release_list=$(github_request -H "Content-Type: application/json" \ "https://api.github.com/repos/${repository}/releases?per_page=100&page=${page}") || failure "Failed to request releases list for ${repository}" # If there's no more results, just bust out of the loop if [ "$(jq 'length' <( printf "%s" "${release_list}" ))" -lt "1" ]; then debug "did not locate draft release named %s in %s" "${release_name}" "${repository}" break fi query="$(printf '.[] | select(.name == "%s")' "${release_name}")" release_content=$(printf "%s" "${release_list}" | jq -r "${query}") ((page++)) done query=".assets[]" if [ -n "${asset_pattern}" ]; then debug "apply pattern filter to draft assets: %s" "${asset_pattern}" query+="$(printf ' | select(.name | contains("%s"))' "${asset_pattern}")" fi local asset_list asset_list=$(printf "%s" "${release_content}" | jq -r "${query} | .url") || failure "Failed to detect asset in release (${release_name}) for ${repository}" local name_list name_list=$(printf "%s" "${release_content}" | jq -r "${query} | .name") || failure "Failed to detect asset in release (${release_name}) for ${repository}" debug "draft release assets list: %s" "${name_list}" local assets asset_names readarray -t assets < <(printf "%s" "${asset_list}") readarray -t asset_names < <(printf "%s" "${name_list}") if [ "${#assets[@]}" -ne "${#asset_names[@]}" ]; then failure "Failed to match download assets with names in release list for ${repository}" fi local idx for ((idx=0; idx<"${#assets[@]}"; idx++ )); do local asset="${assets[$idx]}" local artifact="${asset_names[$idx]}" github_request -o "${artifact}" \ -H "Accept: application/octet-stream" "${asset}" || "Failed to download asset in release (${release_name}) for ${repository} - ${artifact}" printf "downloaded draft release asset at %s\n" "${artifact}" >&2 done repository_bak="${repository}" # restore repository value } # This function is identical to the github_draft_release_assets # function above with one caveat: it does not download the files. # Each file that would be downloaded is simply touched in the # current directory. This provides an easy way to check the # files that would be downloaded without actually downloading # them. # # An example usage of this can be seen in the vagrant package # building where we use this to enable building missing substrates # or packages on re-runs and only download the artifacts if # actually needed. function github_draft_release_asset_names() { local release_reponame="${1}" local release_name="${2}" local asset_pattern="${3}" if [ -z "${release_reponame}" ]; then failure "Repository name is required for draft release assets names" fi if [ -z "${release_name}" ]; then failure "Release name is required for draft release asset names" fi # Override repository value to get correct token automatically local repository_bak="${repository}" repository="${repo_owner}/${release_reponame}" local page=$((1)) local release_content query while [ -z "${release_content}" ]; do local release_list release_list=$(github_request H "Content-Type: application/json" \ "https://api.github.com/repos/${repository}/releases?per_page=100&page=${page}") || failure "Failed to request releases list for ${repository}" # If there's no more results, just bust out of the loop if [ "$(jq 'length' <( printf "%s" "${release_list}" ))" -lt "1" ]; then debug "did not locate draft release named %s in %s" "${release_name}" "${repository}" break fi query="$(printf '.[] | select(.name == "%s")' "${release_name}")" release_content=$(printf "%s" "${release_list}" | jq -r "${query}") ((page++)) done query=".assets[]" if [ -n "${asset_pattern}" ]; then debug "apply pattern filter to draft assets: %s" "${asset_pattern}" query+="$(printf ' | select(.name | contains("%s"))' "${asset_pattern}")" fi local name_list name_list=$(printf "%s" "${release_content}" | jq -r "${query} | .name") || failure "Failed to detect asset in release (${release_name}) for ${repository}" debug "draft release assets list: %s" "${name_list}" local asset_names readarray -t asset_names < <(printf "%s" "${name_list}") local idx for ((idx=0; idx<"${#asset_names[@]}"; idx++ )); do local artifact="${asset_names[$idx]}" touch "${artifact}" || failure "Failed to touch release asset at path: %s" "${artifact}" printf "touched draft release asset at %s\n" "${artifact}" >&2 done repository_bak="${repository}" # restore repository value } # Delete a github release by tag name # NOTE: Releases and prereleases can be deleted using this # function. For draft releases use github_delete_draft_release # # $1: tag name of release # $2: repository name (optional, defaults to current repository name) function github_delete_release() { local release_name="${1}" local release_repo="${2:-$repo_name}" if [ -z "${release_name}" ]; then failure "Release name is required for deletion" fi if [ -z "${release_repo}" ]; then failure "Repository is required for release deletion" fi # Override repository value to get correct token automatically local repository_bak="${repository}" repository="${repo_owner}/${release_repo}" # Fetch the release first local release_content release_content="$(github_request \ -H "Accept: application/vnd.github+json" \ "https://api.github.com/repos/${repository}/releases/tags/${release_name}")" || failure "Failed to fetch release information for '${release_name}' in ${repository}" # Get the release id to reference in delete request local rel_id rel_id="$(jq -r '.id' <( printf "%s" "${release_content}" ) )" || failure "Failed to read release id for '${release_name}' in ${repository}" debug "deleting github release '${release_name}' in ${repository} with id ${rel_id}" # Send the deletion request github_request \ -X "DELETE" \ -H "Accept: application/vnd.github+json" \ "https://api.github.com/repos/${repository}/releases/${rel_id}" > /dev/null || failure "Failed to delete release '${release_name}' in ${repository}" # Restore repository value repository="${repository_bak}" } # Delete draft release with given name # # $1: name of draft release # $2: repository name (optional, defaults to current repository name) function github_delete_draft_release() { local draft_name="${1}" local delete_repo="${2:-$repo_name}" if [ -z "${draft_name}" ]; then failure "Draft name is required for deletion" fi if [ -z "${delete_repo}" ]; then failure "Repository is required for draft deletion" fi # Override repository value to get correct token automatically local repository_bak="${repository}" repository="${repo_owner}/${delete_repo}" local draft_ids=() local page=$((1)) while true; do local release_list list_length release_list=$(github_request -H "Content-Type: application/json" \ "https://api.github.com/repos/${repository}/releases?per_page=100&page=${page}") || failure "Failed to request releases list for draft deletion on ${repository}" list_length="$(jq 'length' <( printf "%s" "${release_list}" ))" || failure "Failed to calculate release length for draft deletion on ${repository}" # If the list is empty then there are no more releases to process if [ -z "${list_length}" ] || [ "${list_length}" -lt 1 ]; then debug "no releases returned for page %d in repository %s" "${page}" "${repository}" break fi local entry i release_draft release_id release_name for (( i=0; i < "${list_length}"; i++ )); do entry="$(jq ".[$i]" <( printf "%s" "${release_list}" ))" || failure "Failed to read entry for draft deletion on ${repository}" release_draft="$(jq -r '.draft' <( printf "%s" "${entry}" ))" || failure "Failed to read entry draft for draft deletion on ${repository}" release_id="$(jq -r '.id' <( printf "%s" "${entry}" ))" || failure "Failed to read entry ID for draft deletion on ${repository}" release_name="$(jq -r '.name' <( printf "%s" "${entry}" ))" || failure "Failed to read entry name for draft deletion on ${repository}" # If the names don't match, skip if [ "${release_name}" != "${draft_name}" ]; then debug "skipping release deletion, name mismatch (%s != %s)" "${release_name}" "${draft_name}" continue fi # If the release is not a draft, fail if [ "${release_draft}" != "true" ]; then debug "skipping release '%s' (ID: %s) from '%s' - release is not a draft" "${draft_name}" "${release_id}" "${repository}" continue fi # If we are here, we found a match draft_ids+=( "${release_id}" ) done ((page++)) done # If no draft ids were found, the release was not found # so we can just return success if [ "${#draft_ids[@]}" -lt "1" ]; then debug "no draft releases found matching name %s in %s" "${draft_name}" "${repository}" repository="${repository_bak}" # restore repository value before return return 0 fi # Still here? Okay! Delete the draft(s) local draft_id for draft_id in "${draft_ids[@]}"; do info "Deleting draft release %s from %s (ID: %d)\n" "${draft_name}" "${repository}" "${draft_id}" github_request -X DELETE "https://api.github.com/repos/${repository}/releases/${draft_id}" || failure "Failed to prune draft release ${draft_name} from ${repository}" done repository="${repository_bak}" # restore repository value before return } # Delete prerelease with given name # # $1: tag name of prerelease # $2: repository name (optional, defaults to current repository name) function github_delete_prerelease() { local tag_name="${1}" local delete_repo="${2:-$repo_name}" if [ -z "${tag_name}" ]; then failure "Tag name is required for deletion" fi if [ -z "${delete_repo}" ]; then failure "Repository is required for prerelease deletion" fi # Override repository value to get correct token automatically local repository_bak="${repository}" repository="${repo_owner}/${delete_repo}" local prerelease prerelease=$(github_request -H "Content-Type: application/vnd.github+json" \ "https://api.github.com/repos/${repository}/releases/tags/${tag_name}") || failure "Failed to get prerelease %s from %s" "${tag_name}" "${repository}" local prerelease_id prerelease_id="$(jq -r '.id' <( printf "%s" "${prerelease}" ))" || failure "Failed to read prerelease ID for %s on %s" "${tag_name}" "${repository}" local is_prerelease is_prerelease="$(jq -r '.prerelease' <( printf "%s" "${prerelease}" ))" || failure "Failed to read prerelease status for %s on %s" "${tag_name}" "${repository}" # Validate the matched release is a prerelease if [ "${is_prerelease}" != "true" ]; then failure "Prerelease %s on %s is not marked as a prerelease, cannot delete" "${tag_name}" "${repository}" fi info "Deleting prerelease %s from repository %s" "${tag_name}" "${repository}" github_request -X DELETE "https://api.github.com/repos/${repository}/releases/${prerelease_id}" || failure "Failed to delete prerelease %s from %s" "${tag_name}" "${repository}" repository="${repository_bak}" # restore repository value before return } # Delete any draft releases that are older than the # given number of days # # $1: days # $2: repository name (optional, defaults to current repository name) function github_draft_release_prune() { github_release_prune "draft" "${@}" } # Delete any prereleases that are older than the # given number of days # # $1: days # $2: repository name (optional, defaults to current repository name) function github_prerelease_prune() { github_release_prune "prerelease" "${@}" } # Delete any releases of provided type that are older than the # given number of days # # $1: type (prerelease or draft) # $2: days # $3: repository name (optional, defaults to current repository name) function github_release_prune() { local prune_type="${1}" if [ -z "${prune_type}" ]; then failure "Type is required for release pruning" fi if [ "${prune_type}" != "draft" ] && [ "${prune_type}" != "prerelease" ]; then failure "Invalid release pruning type provided '%s' (supported: draft or prerelease)" "${prune_type}" fi local days="${2}" if [ -z "${days}" ]; then failure "Number of days to retain is required for pruning" fi if [[ "${days}" = *[!0123456789]* ]]; then failure "Invalid value provided for days to retain when pruning (%s)" "${days}" fi local prune_repo="${3:-$repo_name}" if [ -z "${prune_repo}" ]; then failure "Repository name is required for pruning" fi local prune_seconds now now="$(date '+%s')" prune_seconds=$(("${now}"-("${days}" * 86400))) # Override repository value to get correct token automatically local repository_bak="${repository}" repository="${repo_owner}/${prune_repo}" debug "deleting %ss over %d days old from %s" "${prune_type}" "${days}" "${repository}" local page=$((1)) while true; do local release_list list_length release_list=$(github_request -H "Accept: application/vnd.github+json" \ "https://api.github.com/repos/${repository}/releases?per_page=100&page=${page}") || failure "Failed to request releases list for pruning on ${repository}" list_length="$(jq 'length' <( printf "%s" "${release_list}" ))" || failure "Failed to calculate release length for pruning on ${repository}" if [ -z "${list_length}" ] || [ "${list_length}" -lt "1" ]; then debug "releases listing page %d for %s is empty" "${page}" "${repository}" break fi local entry i release_type release_name release_id release_create date_check for (( i=0; i < "${list_length}"; i++ )); do entry="$(jq ".[${i}]" <( printf "%s" "${release_list}" ))" || failure "Failed to read entry for pruning on %s" "${repository}" release_type="$(jq -r ".${prune_type}" <( printf "%s" "${entry}" ))" || failure "Failed to read entry %s for pruning on %s" "${prune_type}" "${repository}" release_name="$(jq -r '.name' <( printf "%s" "${entry}" ))" || failure "Failed to read entry name for pruning on %s" "${repository}" release_id="$(jq -r '.id' <( printf "%s" "${entry}" ))" || failure "Failed to read entry ID for pruning on %s" "${repository}" release_create="$(jq -r '.created_at' <( printf "%s" "${entry}" ))" || failure "Failed to read entry created date for pruning on %s" "${repository}" date_check="$(date --date="${release_create}" '+%s')" || failure "Failed to parse entry created date for pruning on %s" "${repository}" if [ "${release_type}" != "true" ]; then debug "Skipping %s on %s because release is not a %s" "${release_name}" "${repository}" "${prune_type}" continue fi if [ "$(( "${date_check}" ))" -lt "${prune_seconds}" ]; then info "Deleting release %s from %s\n" "${release_name}" "${prune_repo}" github_request -X DELETE "https://api.github.com/repos/${repository}/releases/${release_id}" || failure "Failed to prune %s %s from %s" "${prune_type}" "${release_name}" "${repository}" fi done ((page++)) done repository="${repository_bak}" # restore the repository value } # Delete all but the latest N number of releases of the provided type # # $1: type (prerelease or draft) # $2: number of releases to retain # $3: repository name (optional, defaults to current repository name) function github_release_prune_retain() { local prune_type="${1}" if [ -z "${prune_type}" ]; then failure "Type is required for release pruning" fi if [ "${prune_type}" != "draft" ] && [ "${prune_type}" != "prerelease" ]; then failure "Invalid release pruning type provided '%s' (supported: draft or prerelease)" "${prune_type}" fi local retain="${2}" if [ -z "${retain}" ]; then failure "Number of releases to retain is required for pruning" fi if [[ "${retain}" = *[!0123456789]* ]]; then failure "Invalid value provided for number of releases to retain when pruning (%s)" "${days}" fi local prune_repo="${3:-$repo_name}" if [ -z "${prune_repo}" ]; then failure "Repository name is required for pruning" fi # Override repository value to get correct token automatically local repository_bak="${repository}" repository="${repo_owner}/${prune_repo}" debug "pruning all %s type releases except latest %d releases" "${prune_type}" "${retain}" local prune_list=() local page=$((1)) while true; do local release_list list_length release_list=$(github_request -H "Accept: application/vnd.github+json" \ "https://api.github.com/repos/${repository}/releases?per_page=100&page=${page}&sort=created_at&direction=desc") || failure "Failed to request releases list for pruning on ${repository}" list_length="$(jq 'length' <( printf "%s" "${release_list}" ))" || failure "Failed to calculate release length for pruning on ${repository}" if [ -z "${list_length}" ] || [ "${list_length}" -lt "1" ]; then debug "releases listing page %d for %s is empty" "${page}" "${repository}" break fi local entry i release_type release_name release_id release_create date_check for (( i=0; i < "${list_length}"; i++ )); do entry="$(jq ".[${i}]" <( printf "%s" "${release_list}" ))" || failure "Failed to read entry for pruning on %s" "${repository}" release_type="$(jq -r ".${prune_type}" <( printf "%s" "${entry}" ))" || failure "Failed to read entry %s for pruning on %s" "${prune_type}" "${repository}" release_name="$(jq -r '.name' <( printf "%s" "${entry}" ))" || failure "Failed to read entry name for pruning on %s" "${repository}" release_id="$(jq -r '.id' <( printf "%s" "${entry}" ))" || failure "Failed to read entry ID for pruning on %s" "${repository}" if [ "${release_type}" != "true" ]; then debug "Skipping %s on %s because release is not a %s" "${release_name}" "${repository}" "${prune_type}" continue fi debug "adding %s '%s' to prune list (ID: %s)" "${prune_type}" "${release_name}" "${release_id}" prune_list+=( "${release_id}" ) done (( page++ )) done local prune_count="${#prune_list[@]}" local prune_trim=$(( "${prune_count}" - "${retain}" )) # If there won't be any remaining items in the list, bail if [ "${prune_trim}" -le 0 ]; then debug "no %ss in %s to prune" "${prune_type}" "${repository}" repository="${repository_bak}" # restore the repository value return 0 fi # Trim down the list to what should be deleted prune_list=("${prune_list[@]:$retain:$prune_trim}") # Now delete what is left in the list local r_id for r_id in "${prune_list[@]}"; do debug "deleting release (ID: %s) from %s" "${r_id}" "${repository}" github_request -X DELETE "https://api.github.com/repos/${repository}/releases/${r_id}" || failure "Failed to prune %s %s from %s" "${prune_type}" "${r_id}" "${repository}" done repository="${repository_bak}" # restore the repository value } # Grab the correct github token to use for authentication. The # rules used for the token to return are as follows: # # * only $GITHUB_TOKEN is set: $GITHUB_TOKEN # * only $HASHIBOT_TOKEN is set: $HASHIBOT_TOKEN # # when both $GITHUB_TOKEN and $HASHIBOT_TOKEN are set: # # * $repository value matches $GITHUB_REPOSITORY: $GITHUB_TOKEN # * $repository value does not match $GITHUB_REPOSITORY: $HASHIBOT_TOKEN # # Will return `0` when a token is returned, `1` when no token is returned function github_token() { local gtoken # Return immediately if no tokens are available if [ -z "${GITHUB_TOKEN}" ] && [ -z "${HASHIBOT_TOKEN}" ]; then debug "no github or hashibot token set" return 1 fi # Return token if only one token exists if [ -n "${GITHUB_TOKEN}" ] && [ -z "${HASHIBOT_TOKEN}" ]; then debug "only github token set" printf "%s\n" "${GITHUB_TOKEN}" return 0 elif [ -n "${HASHIBOT_TOKEN}" ] && [ -z "${GITHUB_TOKEN}" ]; then debug "only hashibot token set" printf "%s\n" "${HASHIBOT_TOKEN}" return 0 fi # If the $repository matches the original $GITHUB_REPOSITORY use the local token if [ "${repository}" = "${GITHUB_REPOSITORY}" ]; then debug "prefer github token " printf "%s\n" "${GITHUB_TOKEN}" return 0 fi # Still here, then we send back that hashibot token printf "%s\n" "${HASHIBOT_TOKEN}" return 0 } # This function is used to make requests to the GitHub API. It # accepts the same argument list that would be provided to the # curl executable. It will check the response status and if a # 429 is received (rate limited) it will pause until the defined # rate limit reset time and then try again. # # NOTE: Informative information (like rate limit pausing) will # be printed to stderr. The response body will be printed to # stdout. Return value of the function will be the exit code # from the curl process. function github_request() { local request_exit=0 local info_prefix="__info__" local info_tmpl="${info_prefix}:code=%{response_code}:header=%{size_header}:download=%{size_download}:file=%{filename_effective}" local raw_response_content local curl_cmd=("curl" "-w" "${info_tmpl}" "-i" "-SsL" "--fail") local gtoken # Only add the authentication token if we have one if gtoken="$(github_token)"; then curl_cmd+=("-H" "Authorization: token ${gtoken}") fi # Attach the rest of the arguments curl_cmd+=("${@#}") debug "initial request: %s" "${curl_cmd[*]}" # Make our request raw_response_content="$("${curl_cmd[@]}")" || request_exit="${?}" # Define the status here since we will set it in # the conditional below of something weird happens local status # Check if our response content starts with the info prefix. # If it does, we need to extract the headers from the file. if [[ "${raw_response_content}" = "${info_prefix}"* ]]; then debug "extracting request information from: %s" "${raw_response_content}" raw_response_content="${raw_response_content#"${info_prefix}":code=}" local response_code="${raw_response_content%%:*}" debug "response http code: %s" "${response_code}" raw_response_content="${raw_response_content#*:header=}" local header_size="${raw_response_content%%:*}" debug "response header size: %s" "${header_size}" raw_response_content="${raw_response_content#*:download=}" local download_size="${raw_response_content%%:*}" debug "response file size: %s" "${download_size}" raw_response_content="${raw_response_content#*:file=}" local file_name="${raw_response_content}" debug "response file name: %s" "${file_name}" if [ -f "${file_name}" ]; then # Read the headers from the file and place them in the # raw_response_content to be processed local download_fd exec {download_fd}<"${file_name}" debug "file descriptor created for header grab (source: %s): %q" "${file_name}" "${download_fd}" debug "reading response header content from %s" "${file_name}" read -r -N "${header_size}" -u "${download_fd}" raw_response_content # Close our descriptor debug "closing file descriptor: %q" "${download_fd}" exec {download_fd}<&- # Now trim the headers from the file content debug "trimming response header content from %s" "${file_name}" tail -c "${download_size}" "${file_name}" > "${file_name}.trimmed" || failure "Could not trim headers from downloaded file (%s)" "${file_name}" mv -f "${file_name}.trimmed" "${file_name}" || failure "Could not replace downloaded file with trimmed file (%s)" "${file_name}" else debug "expected file not found (%s)" "${file_name}" status="${response_code}" fi else # Since the response wasn't written to a file, trim the # info from the end of the response if [[ "${raw_response_content}" != *"${info_prefix}"* ]]; then debug "github request response does not include information footer" failure "Unexpected error encountered, partial GitHub response returned" fi raw_response_content="${raw_response_content%"${info_prefix}"*}" fi local ratelimit_reset local response_content="" # Read the response into lines for processing local lines mapfile -t lines < <( printf "%s" "${raw_response_content}" ) # Process the lines to extract out status and rate # limit information. Populate the response_content # variable with the actual response value local i for (( i=0; i < "${#lines[@]}"; i++ )); do # The line will have a trailing `\r` so just # trim it off local line="${lines[$i]%%$'\r'*}" # strip any leading/trailing whitespace characters read -rd '' line <<< "${line}" if [ -z "${line}" ] && [[ "${status}" = "2"* ]]; then local start="$(( i + 1 ))" local remain="$(( "${#lines[@]}" - "${start}" ))" local response_lines=("${lines[@]:$start:$remain}") response_content="${response_lines[*]}" break fi if [[ "${line}" == "HTTP/"* ]]; then status="${line##* }" debug "http status found: %d" "${status}" fi if [[ "${line}" == "x-ratelimit-reset"* ]]; then ratelimit_reset="${line##*ratelimit-reset: }" debug "ratelimit reset time found: %s" "${ratelimit_reset}" fi done # If the status was not detected, force an error if [ -z "${status}" ]; then failure "Failed to detect response status for GitHub request" fi # If the status was a 2xx code then everything is good # and we can return the response and be done if [[ "${status}" = "2"* ]]; then printf "%s" "${response_content}" return 0 fi # If we are being rate limited, print a notice and then # wait until the rate limit will be reset if [[ "${status}" = "429" ]] || [[ "${status}" = "403" ]]; then debug "request returned %d status, checking for rate limiting" "${status}" # If the ratelimit reset was not detected force an error if [ -z "${ratelimit_reset}" ]; then if [ "${status}" = "403" ]; then failure "Request failed with 403 status response" fi failure "Failed to detect rate limit reset time for GitHub request" fi debug "rate limiting has been detected on request" local reset_date reset_date="$(date --date="@${ratelimit_reset}")" || failure "Failed to GitHub parse ratelimit reset timestamp (${ratelimit_reset})" local now now="$( date '+%s' )" || failure "Failed to get current timestamp in ratelimit check" local reset_wait="$(( "${ratelimit_reset}" - "${now}" + 2))" printf "GitHub rate limit encountered, reset at %s (waiting %d seconds)\n" \ "${reset_date}" "${reset_wait}" >&2 sleep "${reset_wait}" || failure "Pause for GitHub rate limited request retry failed" github_request "${@}" return "${?}" fi # At this point we just need to return error information printf "GitHub request returned HTTP status: %d\n" "${status}" >&2 printf "Response body: %s\n" "${response_content}" >&2 return "${request_exit}" } # Lock issues which have been closed for longer than # provided number of days. A date can optionally be # provided which will be used as the earliest date to # search. A message can optionally be provided which # will be added as a comment in the issue before locking. # # -d: number of days # -m: message to include when locking the issue (optional) # -s: date to begin searching from (optional) function lock_issues() { local OPTIND opt days start since message while getopts ":d:s:m:" opt; do case "${opt}" in "d") days="${OPTARG}" ;; "s") start="${OPTARG}" ;; "m") message="${OPTARG}" ;; *) failure "Invalid flag provided to lock_issues" ;; esac done shift $((OPTIND-1)) # If days where not provided, return error if [ -z "${days}" ]; then failure "Number of days since closed required for locking issues" fi # If a start date was provided, check that it is a format we can read if [ -n "${start}" ]; then if ! since="$(date --iso-8601=seconds --date="${start}" 2> /dev/null)"; then failure "$(printf "Start date provided for issue locking could not be parsed (%s)" "${start}")" fi fi debug "locking issues that have been closed for at least %d days" "${days}" local req_args=() # Start with basic setup req_args+=("-H" "Accept: application/vnd.github+json") # Add authorization header req_args+=("-H" "Authorization: token ${GITHUB_TOKEN}") # Construct our request endpoint local req_endpoint="https://api.github.com/repos/${repository}/issues" # Page counter for requests local page=$(( 1 )) # Request arguments local req_params=("per_page=20" "state=closed") # If we have a start time, include it if [ -n "${since}" ]; then req_params+=("since=${since}") fi # Compute upper bound for issues we can close local lock_seconds now now="$(date '+%s')" lock_seconds=$(("${now}"-("${days}" * 86400))) while true; do # Join all request parameters with '&' local IFS_BAK="${IFS}" IFS="&" local all_params=("${req_params[*]}" "page=${page}") local params="${all_params[*]}" IFS="${IFS_BAK}" local issue_list issue_count # Make our request to get a page of issues issue_list="$(github_request "${req_args[@]}" "${req_endpoint}?${params}")" || failure "Failed to get repository issue list for ${repository}" issue_count="$(jq 'length' <( printf "%s" "${issue_list}" ))" || failure "Failed to compute count of issues in list for ${repository}" if [ -z "${issue_count}" ] || [ "${issue_count}" -lt 1 ]; then break fi # Iterate through the list local i for (( i=0; i < "${issue_count}"; i++ )); do # Extract the issue we are going to process local issue issue="$(jq ".[${i}]" <( printf "%s" "${issue_list}" ))" || failure "Failed to extract issue from list for ${repository}" # Grab the ID of this issue local issue_id issue_id="$(jq -r '.id' <( printf "%s" "${issue}" ))" || failure "Failed to read ID of issue for ${repository}" # First check if issue is already locked local issue_locked issue_locked="$(jq -r '.locked' <( printf "%s" "${issue}" ))" || failure "Failed to read locked state of issue for ${repository}" if [ "${issue_locked}" == "true" ]; then debug "Skipping %s#%s because it is already locked" "${repository}" "${issue_id}" continue fi # Get the closed date local issue_closed issue_closed="$(jq -r '.closed_at' <( printf "%s" "${issue}" ))" || failure "Failed to read closed at date of issue for ${repository}" # Convert closed date to unix timestamp local date_check date_check="$( date --date="${issue_closed}" '+%s' )" || failure "Failed to parse closed at date of issue for ${repository}" # Check if the issue is old enough to be locked if [ "$(( "${date_check}" ))" -lt "${lock_seconds}" ]; then printf "Locking issue %s#%s\n" "${repository}" "${issue_id}" >&2 # If we have a comment to add before locking, do that now if [ -n "${message}" ]; then local message_json message_json=$(jq -n \ --arg msg "$(printf "%b" "${message}")" \ '{body: $msg}' ) || failure "Failed to create issue comment JSON content for ${repository}" debug "adding issue comment before locking on %s#%s" "${repository}" "${issue_id}" github_request "${req_args[@]}" -X POST "${req_endpoint}/${issue_id}/comments" -d "${message_json}" || failure "Failed to create issue comment on ${repository}#${issue_id}" fi # Lock the issue github_request "${req_args[@]}" -X PUT "${req_endpoint}/${issue_id}/lock" -d '{"lock_reason":"resolved"}' || failure "Failed to lock issue ${repository}#${issue_id}" fi done ((page++)) done } # Send a repository dispatch to the defined repository # # $1: repository name # $2: event type (single word string) # $n: "key=value" pairs to build payload (optional) # function github_repository_dispatch() { local drepo_name="${1}" local event_type="${2}" if [ -z "${drepo_name}" ]; then failure "Repository name is required for repository dispatch" fi # shellcheck disable=SC2016 local payload_template='{"vagrant-ci": $vagrant_ci' local jqargs=("--arg" "vagrant_ci" "true") local arg for arg in "${@:3}"; do local payload_key="${arg%%=*}" local payload_value="${arg##*=}" payload_template+=", \"${payload_key}\": \$${payload_key}" # shellcheck disable=SC2089 jqargs+=("--arg" "${payload_key}" "${payload_value}") done payload_template+="}" # NOTE: we want the arguments to be expanded below local payload payload=$(jq -n "${jqargs[@]}" "${payload_template}" ) || failure "Failed to generate repository dispatch payload" # shellcheck disable=SC2016 local msg_template='{event_type: $event_type, client_payload: $payload}' local msg msg=$(jq -n \ --argjson payload "${payload}" \ --arg event_type "${event_type}" \ "${msg_template}" \ ) || failure "Failed to generate repository dispatch message" # Update repository value to get correct token local repository_bak="${repository}" repository="${repo_owner}/${drepo_name}" github_request -X "POST" \ -H 'Accept: application/vnd.github.everest-v3+json' \ --data "${msg}" \ "https://api.github.com/repos/${repo_owner}/${drepo_name}/dispatches" || failure "Repository dispatch to ${repo_owner}/${drepo_name} failed" # Restore the repository value repository="${repository_bak}" } # Copy a function to a new name # # $1: Original function name # $2: Copy function name function copy_function() { local orig="${1}" local new="${2}" local fn fn="$(declare -f "${orig}")" || failure "Orignal function (${orig}) not defined" fn="${new}${fn#*"${orig}"}" eval "${fn}" } # Rename a function to a new name # # $1: Original function name # $2: New function name function rename_function() { local orig="${1}" copy_function "${@}" unset -f "${orig}" } # Cleanup wrapper so we get some output that cleanup is starting function _cleanup() { debug "* Running cleanup task..." # Always restore this value for cases where a failure # happened within a function while this value was in # a modified state repository="${_repository_backup}" cleanup } # Stub cleanup method which can be redefined # within actual script function cleanup() { debug "** No cleanup tasks defined" } # Only setup our cleanup trap and fail alias when not in testing if [ -z "${BATS_TEST_FILENAME}" ]; then trap _cleanup EXIT # This is a compatibility alias for existing scripts which # use the common.sh library. BATS support defines a `fail` # function so it has been renamed `failure` to prevent the # name collision. When not running under BATS we enable the # `fail` function so any scripts that have not been updated # will not be affected. copy_function "failure" "fail" fi # Make sure the CI bin directory exists if [ ! -d "${ci_bin_dir}" ]; then wrap mkdir -p "${ci_bin_dir}" \ "Failed to create CI bin directory" fi # Always ensure CI bin directory is in PATH if [[ "${PATH}" != *"${ci_bin_dir}"* ]]; then export PATH="${PATH}:${ci_bin_dir}" fi # Enable debugging. This needs to be enabled with # extreme caution when used on public repositories. # Output with debugging enabled will likely include # secret values which should not be publicly exposed. # # If repository is public, FORCE_PUBLIC_DEBUG environment # variable must also be set. priv_args=("-H" "Accept: application/json") # If we have a token available, use it for the check query if [ -n "${HASHIBOT_TOKEN}" ]; then priv_args+=("-H" "Authorization: token ${GITHUB_TOKEN}") elif [ -n "${GITHUB_TOKEN}" ]; then priv_args+=("-H" "Authorization: token ${HASHIBOT_TOKEN}") fi if [ -n "${GITHUB_ACTIONS}" ]; then priv_check="$(curl "${priv_args[@]}" -s "https://api.github.com/repos/${GITHUB_REPOSITORY}" | jq .private)" || failure "Repository visibility check failed" fi # If the value wasn't true we unset it to indicate not private. The # repository might actually be private but we weren't supplied a # token (or one with correct permissions) so we fallback to the safe # assumption of not private. if [ "${priv_check}" != "true" ]; then readonly is_public="1" readonly is_private="" else # shellcheck disable=SC2034 readonly is_public="" # shellcheck disable=SC2034 readonly is_private="1" fi # Check if we are running a job created by a tag. If so, # mark this as being a release job and set the release_version if [[ "${GITHUB_REF}" == *"refs/tags/"* ]]; then export tag="${GITHUB_REF##*tags/}" if valid_release_version "${tag}"; then readonly release=1 export release_version="${tag##*v}" else readonly release fi else # shellcheck disable=SC2034 readonly release fi # Seed an initial output file output_file > /dev/null 2>&1 vagrant_cloud-3.1.3/.ci/load-ci.sh000077500000000000000000000011761477154370500167100ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: MPL-2.0 csource="${BASH_SOURCE[0]}" while [ -h "$csource" ] ; do csource="$(readlink "$csource")"; done if ! root="$( cd -P "$( dirname "$csource" )/../" && pwd )"; then echo "⛔ ERROR: Failed to determine root local directory ⛔" >&2 exit 1 fi export root export ci_bin_dir="${root}/.ci/.ci-utility-files" # shellcheck source=/dev/null if ! source "${ci_bin_dir}/common.sh"; then echo "⛔ ERROR: Failed to source Vagrant CI common file ⛔" >&2 exit 1 fi export PATH="${PATH}:${ci_bin_dir}" # And we are done! debug "VagrantCI Loaded" vagrant_cloud-3.1.3/.ci/publish000077500000000000000000000016701477154370500164340ustar00rootroot00000000000000#!/usr/bin/env bash export SLACK_USERNAME="Vagrant Cloud RubyGem" export SLACK_ICON="https://avatars.slack-edge.com/2017-10-17/257000837696_070f98107cdacc0486f6_36.png" export SLACK_TITLE="💎 RubyGems Publishing" export SLACK_CHANNEL="#feed-vagrant" csource="${BASH_SOURCE[0]}" while [ -h "$csource" ] ; do csource="$(readlink "$csource")"; done root="$( cd -P "$( dirname "$csource" )/../" && pwd )" . "${root}/.ci/load-ci.sh" pushd "${root}" info "Building vagrant_cloud RubyGem..." build_info="$(gem build vagrant_cloud.gemspec)" || failure "Failed to build vagrant_cloud RubyGem" version="${build_info##*Version: }" version="${version%$'\n'*}" gem_matches=( "${root}"/vagrant_cloud*.gem ) gem_file="${gem_matches[0]}" if [ ! -f "${gem_file}" ]; then failure "Unable to locate vagrant_cloud RubyGem file" fi publish_to_rubygems "${gem_file}" slack -m "New version of vagrant_cloud published: ${version} (file: ${gem_file##*/})" vagrant_cloud-3.1.3/.ci/sync000077500000000000000000000025551477154370500157450ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: MIT csource="${BASH_SOURCE[0]}" while [ -h "$csource" ] ; do csource="$(readlink "$csource")"; done root="$( cd -P "$( dirname "$csource" )/../" && pwd )" . "${root}/.ci/load-ci.sh" pushd "${root}" # Configure for hashibot hashibot_git if [ "${repo_name}" = "vagrant_cloud" ]; then remote_repository="hashicorp/vagrant_cloud-builder" else remote_repository="hashicorp/vagrant_cloud" fi wrap git config pull.rebase false \ "Failed to configure git pull strategy" echo "Adding remote mirror repository '${remote_repository}'..." wrap git remote add mirror "https://${HASHIBOT_USERNAME}:${HASHIBOT_TOKEN}@github.com/${remote_repository}" \ "Failed to add mirror '${remote_repository}' for sync" echo "Updating configured remotes..." wrap_stream git remote update mirror \ "Failed to update mirror repository (${remote_repository}) for sync" rb=$(git branch -r --list "mirror/${ident_ref}") if [ "${rb}" != "" ]; then echo "Pulling ${ident_ref} from mirror..." wrap_stream git pull mirror "${ident_ref}" \ "Failed to pull ${ident_ref} from mirror repository (${remote_repository}) for sync" fi echo "Pushing ${ident_ref} to mirror..." wrap_stream git push mirror "${ident_ref}" \ "Failed to sync mirror repository (${remote_repository})" vagrant_cloud-3.1.3/.github/000077500000000000000000000000001477154370500157235ustar00rootroot00000000000000vagrant_cloud-3.1.3/.github/workflows/000077500000000000000000000000001477154370500177605ustar00rootroot00000000000000vagrant_cloud-3.1.3/.github/workflows/disable-pull-requests.yml000066400000000000000000000010411477154370500247250ustar00rootroot00000000000000name: Disable Pull Requests on: pull_request: types: [opened, reopened] jobs: closer: name: Automatic Pull Request Closer if: github.repository == 'hashicorp/vagrant_cloud-builder' runs-on: ubuntu-latest permissions: pull-requests: write steps: - run: | gh pr close $PR -c "This repository is a mirror of hashicorp/vagrant_cloud." env: PR: ${{ github.event.pull_request.number }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_REPO: ${{ github.repository }} vagrant_cloud-3.1.3/.github/workflows/release.yml000066400000000000000000000025571477154370500221340ustar00rootroot00000000000000name: Release vagrant_cloud RubyGem on: push: branches: - 'release-*' tags: - 'v*' jobs: publish: name: Publish vagrant_cloud RubyGem if: github.repository == 'hashicorp/vagrant_cloud-builder' runs-on: ['self-hosted', 'ondemand', 'linux', 'type=t3.small'] permissions: contents: read id-token: write steps: - name: Authentication id: vault-auth run: vault-auth - name: Secrets id: secrets uses: hashicorp/vault-action@v2 with: url: ${{ steps.vault-auth.outputs.addr }} caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} token: ${{ steps.vault-auth.outputs.token }} secrets: kv/data/github/hashicorp/vagrant_cloud-builder rubygems_api_key; kv/data/teams/vagrant/slack webhook; - name: Code Checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Setup Ruby uses: ruby/setup-ruby@922ebc4c5262cd14e07bb0e1db020984b6c064fe # v1.226.0 with: ruby-version: 3.1 - name: Publish run: ./.ci/publish env: RUBYGEMS_API_KEY: ${{ steps.secrets.outputs.rubygems_api_key }} GEM_HOST_API_KEY: ${{ steps.secrets.outputs.rubygems_api_key }} SLACK_WEBHOOK: ${{ steps.secrets.outputs.webhook }} vagrant_cloud-3.1.3/.github/workflows/sync.yml000066400000000000000000000014011477154370500214530ustar00rootroot00000000000000name: Repository Code Sync on: push: branches: - 'main' tags: - 'v*' workflow_dispatch: branches: - 'main' jobs: sync-builder: name: Sync builder repository if: github.repository == 'hashicorp/vagrant_cloud' runs-on: ubuntu-latest steps: - name: Code Checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: persist-credentials: false fetch-depth: 0 - name: Sync Builder Repository run: ./.ci/sync working-directory: ${{github.workspace}} env: HASHIBOT_TOKEN: ${{ secrets.HASHIBOT_TOKEN }} HASHIBOT_USERNAME: ${{ vars.HASHIBOT_USERNAME }} SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} vagrant_cloud-3.1.3/.github/workflows/testing.yml000066400000000000000000000013741477154370500221650ustar00rootroot00000000000000name: Vagrant Cloud Unit Tests on: push: branches: - main - 'test-*' paths: - 'lib/**' - 'spec/**' pull_request: branches: - main paths: - 'lib/**' - 'spec/**' jobs: unit-tests: runs-on: ubuntu-latest strategy: matrix: ruby: [ '3.1', '3.2', '3.3' ] name: Vagrant Cloud unit tests on Ruby ${{ matrix.ruby }} steps: - name: Code Checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Setup Ruby uses: ruby/setup-ruby@922ebc4c5262cd14e07bb0e1db020984b6c064fe # v1.226.0 with: ruby-version: ${{matrix.ruby}} bundler-cache: true - name: Run Tests run: bundle exec rake vagrant_cloud-3.1.3/.gitignore000066400000000000000000000000451477154370500163520ustar00rootroot00000000000000/Gemfile.lock /vendor /.bundle *.gem vagrant_cloud-3.1.3/CHANGELOG.md000066400000000000000000000055771477154370500162120ustar00rootroot00000000000000# v3.1.3 (March 28, 2025) * Fix box create/update requests [GH-91](https://github.com/hashicorp/vagrant_cloud/pull/91) # v3.1.2 (November 11, 2024) * Add support for HCP authentication [GH-90](https://github.com/hashicorp/vagrant_cloud/pull/88) * Update excon dependency constraint [GH-88](https://github.com/hashicorp/vagrant_cloud/pull/88) * Update REXML dependency constraint [GH-87](https://github.com/hashicorp/vagrant_cloud/pull/87) # v3.1.1 (January 17, 2024) * Update path prefixing behavior on request [GH-85](https://github.com/hashicorp/vagrant_cloud/pull/85) # v3.1.0 (September 22, 2023) * Add support for architecture in providers [GH-82](https://github.com/hashicorp/vagrant_cloud/pull/82) # v3.0.5 (July 22, 2021) * Updates for Ruby 3 compatibility [GH-76](https://github.com/hashicorp/vagrant_cloud/pull/76) # v3.0.4 (March 18, 2021) * Ensure URL is included when saving provider [GH-69](https://github.com/hashicorp/vagrant_cloud/pull/69) # v3.0.3 (February 19, 2021) * Save box before saving versions [GH-70](https://github.com/hashicorp/vagrant_cloud/pull/70) # v3.0.2 (October 30, 2020) * Raise custom exception on request error [GH-67](https://github.com/hashicorp/vagrant_cloud/pull/67) # v3.0.1 (October 27, 2020) * Fixes on authentication related client methods [GH-65](https://github.com/hashicorp/vagrant_cloud/pull/65) * Prevent frozen data modifications on deletions [GH-65](https://github.com/hashicorp/vagrant_cloud/pull/65) * Update direct upload callback behaviors [GH-65](https://github.com/hashicorp/vagrant_cloud/pull/65) # v3.0.0 (September 21, 2020) * Refactor library implementation [GH-59](https://github.com/hashicorp/vagrant_cloud/pull/59) * Add support for direct storage uploads [GH-62](https://github.com/hashicorp/vagrant_cloud/pull/62) _NOTE_: This release includes breaking changes and is not backwards compatible # v2.0.3 (October 8, 2019) * Pass access_token and base_url into legacy ensure methods [GH-50](https://github.com/hashicorp/vagrant_cloud/pull/50) * Support passing checksum and checksum type values [GH-51](https://github.com/hashicorp/vagrant_cloud/pull/51) # v2.0.2 (January 9, 2019) * Properly raise error if CLI is invoked [GH-40](https://github.com/hashicorp/vagrant_cloud/pull/40) * Only update Box attribute if non-empty hash is given [GH-44](https://github.com/hashicorp/vagrant_cloud/pull/44) * Raise InvalidVersion error if version number for Version attribute is invalid [GH-45](https://github.com/hashicorp/vagrant_cloud/pull/45) * Fix `ensure_box` when box does not exist [GH-43](https://github.com/hashicorp/vagrant_cloud/pull/43) # v2.0.1 * Remove JSON runtime dependency [GH-39](https://github.com/hashicorp/vagrant_cloud/pull/39) # v2.0.0 * Refactor with updated APIs [GH-35](https://github.com/hashicorp/vagrant_cloud/pull/35) * Use header for authentication token [GH-33](https://github.com/hashicorp/vagrant_cloud/pull/33) # v1.1.0 vagrant_cloud-3.1.3/Gemfile000066400000000000000000000000471477154370500156570ustar00rootroot00000000000000source 'https://rubygems.org' gemspec vagrant_cloud-3.1.3/LICENSE000066400000000000000000000021231477154370500153660ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2014 Cargo Media Copyright (c) 2017 HashiCorp Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. vagrant_cloud-3.1.3/README.md000066400000000000000000000115121477154370500156420ustar00rootroot00000000000000# vagrant_cloud Ruby client for the [Vagrant Cloud API](https://www.vagrantup.com/docs/vagrant-cloud/api.html). [![Gem Version](https://img.shields.io/gem/v/vagrant_cloud.svg)](https://rubygems.org/gems/vagrant_cloud) This library provides the functionality to create, modify, and delete boxes, versions, and providers on Vagrant Cloud. ## Usage The Vagrant Cloud library provides two methods for interacting with the Vagrant Cloud API. The first is direct interaction using a `VagrantCloud::Client` instance. The second is a basic model based approach using a `VagrantCloud::Account` instance. ### Authentication The access token that is used for authenticated requests can be set in one of three ways: * Static access token set directly in the client * Static access token extracted from the `VAGRANT_CLOUD_TOKEN` environment variable * Generated [HCP service principal](https://developer.hashicorp.com/hcp/docs/hcp/iam/service-principal) access token when `HCP_CLIENT_ID` and `HCP_CLIENT_SECRET` environment variables are set ### Direct Client The `VagrantCloud::Client` class contains all the underlying functionality which with `vagrant_cloud` library uses for communicating with Vagrant Cloud. It can be used directly for quickly and easily sending requests to Vagrant Cloud. The `VagrantCloud::Client` class will automatically handle any configured authentication, request parameter structuring, and response validation. All API related methods in the `VagrantCloud::Client` class will return `Hash` results. Example usage (display box details): ```ruby require "vagrant_cloud" client = VagrantCloud::Client.new(access_token: "MY_TOKEN") box = client.box_get(username: "hashicorp", name: "bionic64") puts "Box: #{box[:tag]} Description: #{box[:description]}" ``` Example usage (creating box and releasing a new version): ```ruby require "vagrant_cloud" require "net/http" # Create a new client client = VagrantCloud::Client.new(access_token: "MY_TOKEN") # Create a new box client.box_create( username: "hashicorp", name: "test-bionic64", short_description: "Test Box", long_description: "Testing box for an example", is_private: false ) # Create a new version client.box_version_create( username: "hashicorp", name: "test-bionic64", version: "1.0.0", description: "Version 1.0.0 release" ) # Create a new provider client.box_version_provider_create( username: "hashicorp", name: "test-bionic64", version: "1.0.0", provider: "virtualbox" ) # Request box upload URL upload_url = client.box_version_provider_upload( username: "hashicorp", name: "test-bionic64", version: "1.0.0", provider: "virtualbox" ) # Upload box asset uri = URI.parse(upload_url[:upload_path]) request = Net::HTTP::Post.new(uri) box = File.open(BOX_PATH, "rb") request.set_form([["file", box]], "multipart/form-data") response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: uri.scheme.eql?("https")) do |http| http.request(request) end # Release the version client.box_version_release( username: "hashicorp", name: "test-bionic64", version: "1.0.0" ) ``` ### Simple Models The `VagrantCloud::Account` class is the entry point for using simple models to interact with Vagrant Cloud. Example usage (display box details): ```ruby require "vagrant_cloud" account = VagrantCloud::Account.new(access_token: "MY_TOKEN") org = account.organization(name: "hashicorp") box = org.boxes.detect { |b| b.name == "bionic64" } puts "Box: #{box[:tag]} Description: #{box[:description]}" ``` Example usage (creating box and releasing a new version): ```ruby require "vagrant_cloud" # Load our account account = VagrantCloud::Account.new(access_token: "MY_TOKEN") # Load organization org = account.organization(name: "hashicorp") # Create a new box box = org.add_box("test-bionic64") box.description = "Testing box for an example" box.short_description = "Test Box" # Create a new version version = box.add_version("1.0.0") version.description = "Version 1.0.0 release" # Create a new provider provider = version.add_provider("virtualbox") # Save the box, version, and provider box.save # Upload box asset provider.upload(path: BOX_PATH) # Release the version version.release ``` ## Development & Contributing Pull requests are very welcome! Install dependencies: ``` bundle install ``` Run the tests: ``` bundle exec rspec ``` ## Releasing Release a new version: 1. Update the version in the `version.txt` file 1. Commit the change to master 1. Create a new version tag in git: `git tag vX.X.X` 1. Push the new tag and master to GitHub `git push origin main --tags` The new release will be automatically built and published. ## History - This gem was developed and maintained by [Cargo Media](https://www.cargomedia.ch) from April 2014 until October 2017. - The `vagrant_cloud` CLI tool included in this RubyGem has been deprecated and removed. See `vagrant cloud` for a replacement. vagrant_cloud-3.1.3/Rakefile000066400000000000000000000002741477154370500160330ustar00rootroot00000000000000$LOAD_PATH.unshift File.expand_path('lib', __dir__) require 'bundler/gem_tasks' Dir.glob(File.expand_path('tasks/*.rake', __dir__)).each do |task| load task end task default: [:spec] vagrant_cloud-3.1.3/lib/000077500000000000000000000000001477154370500151315ustar00rootroot00000000000000vagrant_cloud-3.1.3/lib/vagrant_cloud.rb000066400000000000000000000012461477154370500203110ustar00rootroot00000000000000require "excon" require "log4r" require "json" require "securerandom" require "set" require 'singleton' require "thread" module VagrantCloud autoload :Account, "vagrant_cloud/account" autoload :Auth, "vagrant_cloud/auth" autoload :Box, "vagrant_cloud/box" autoload :Client, "vagrant_cloud/client" autoload :Data, "vagrant_cloud/data" autoload :Error, "vagrant_cloud/error" autoload :Instrumentor, "vagrant_cloud/instrumentor" autoload :Logger, "vagrant_cloud/logger" autoload :Organization, "vagrant_cloud/organization" autoload :Response, "vagrant_cloud/response" autoload :Search, "vagrant_cloud/search" autoload :VERSION, "vagrant_cloud/version" end vagrant_cloud-3.1.3/lib/vagrant_cloud/000077500000000000000000000000001477154370500177615ustar00rootroot00000000000000vagrant_cloud-3.1.3/lib/vagrant_cloud/account.rb000066400000000000000000000065341477154370500217520ustar00rootroot00000000000000module VagrantCloud # VagrantCloud account class Account # @return [Client] attr_reader :client # @return [String] username of this account attr_reader :username # @return [Instrumentor::Collection] Instrumentor in use attr_reader :instrumentor # Create a new Account instance # # @param [String] access_token Authentication token # @param [Client] client Client to use for account # @param [String] custom_server Custom server URL for client # @param [Integer] retry_count Number of retries on idempotent requests # @param [Integer] retry_interval Number of seconds to wait between requests # @param [Instrumentor::Core] instrumentor Instrumentor to use # @return [Account] def initialize(access_token: nil, client: nil, custom_server: nil, retry_count: nil, retry_interval: nil, instrumentor: nil) raise ArgumentError, "Account accepts `access_token` or `client` but not both" if client && access_token raise TypeError, "Expected `#{Client.name}` but received `#{client.class.name}`" if client && !client.is_a?(Client) if client @client = client else @client = Client.new( access_token: access_token, url_base: custom_server, retry_count: retry_count, retry_interval: retry_interval, instrumentor: instrumentor ) end setup! end # @return [Search] def searcher Search.new(account: self) end #--------------------------- # Authentication API Helpers #--------------------------- # Create a new access token # @param [String] password Remote password # @param [String] description Description of token # @param [String] code 2FA code # @return [Response::CreateToken] def create_token(password:, description: Data::Nil, code: Data::Nil) r = client.authentication_token_create(username: username, password: password, description: description, code: code) Response::CreateToken.new( token: r[:token], token_hash: r[:token_hash], created_at: r[:created_at], description: r[:description] ) end # Delete the current token # # @return [self] def delete_token client.authentication_token_delete self end # Validate the current token # # @return [self] def validate_token client.authentication_token_validate self end # Request a 2FA code is sent # # @param [String] delivery_method Delivery method of 2FA # @param [String] password Account password # @return [Response] def request_2fa_code(delivery_method:, password:) r = client.authentication_request_2fa_code(username: username, password: password, delivery_method: delivery_method) Response::Request2FA.new(destination: r.dig(:two_factor, :obfuscated_destination)) end # Fetch the requested organization # # @param [String] name Organization name # @return [Organization] def organization(name: nil) org_name = name || username r = client.organization_get(name: org_name) Organization.load(account: self, **r) end protected def setup! if client.access_token r = client.authentication_token_validate @username = r.dig(:user, :username) end end end end vagrant_cloud-3.1.3/lib/vagrant_cloud/auth.rb000066400000000000000000000107421477154370500212530ustar00rootroot00000000000000require "oauth2" module VagrantCloud class Auth # Default authentication URL DEFAULT_AUTH_URL = "https://auth.idp.hashicorp.com".freeze # Default authorize path DEFAULT_AUTH_PATH = "/oauth2/auth".freeze # Default token path DEFAULT_TOKEN_PATH = "/oauth2/token".freeze # Number of seconds to pad token expiry TOKEN_EXPIRY_PADDING = 5 # HCP configuration for generating authentication tokens # # @param [String] client_id Service principal client ID # @param [String] client_secret Service principal client secret # @param [String] auth_url Authentication URL end point # @param [String] auth_path Authorization path (relative to end point) # @param [String] token_path Token path (relative to end point) HCPConfig = Struct.new(:client_id, :client_secret, :auth_url, :auth_path, :token_path, keyword_init: true) do # Raise exception if any values are missing def validate! [:client_id, :client_secret, :auth_url, :auth_path, :token_path].each do |name| raise ArgumentError, "Missing required HCP authentication configuration value: HCP_#{name.to_s.upcase}" if self.send(name).to_s.empty? end end end # HCP token # # @param [String] token HCP token value # @param [Integer] expires_at Epoch seconds HCPToken = Struct.new(:token, :expires_at, keyword_init: true) do # Raise exception if any values are missing def validate! [:token, :expires_at].each do |name| raise ArgumentError, "Missing required token value - #{name.inspect}" if self.send(name).nil? end end # @return [Boolean] token is expired # @note Will show token as expired TOKEN_EXPIRY_PADDING # seconds prior to actual expiry def expired? validate! Time.now.to_i > (expires_at - TOKEN_EXPIRY_PADDING) end # @return [Boolean] token is not expired def valid? !expired? end end # Create a new auth instance # # @param [String] access_token Static access token # @note If no access token is provided, the token will be extracted # from the VAGRANT_CLOUD_TOKEN environment variable. If that value # is not set, the HCP_CLIENT_ID and HCP_CLIENT_SECRET environment # variables will be checked. If found, tokens will be generated as # needed using the client id and secret. Otherwise, no token will # will be available. def initialize(access_token: nil) @token = access_token # The Vagrant Cloud token has precedence over # anything else, so if it is set then it is # the only value used. @token = ENV["VAGRANT_CLOUD_TOKEN"] if @token.nil? # If there is no token set, attempt to load HCP configuration if @token.to_s.empty? && (ENV["HCP_CLIENT_ID"] || ENV["HCP_CLIENT_SECRET"]) @config = HCPConfig.new( client_id: ENV["HCP_CLIENT_ID"], client_secret: ENV["HCP_CLIENT_SECRET"], auth_url: ENV.fetch("HCP_AUTH_URL", DEFAULT_AUTH_URL), auth_path: ENV.fetch("HCP_AUTH_PATH", DEFAULT_AUTH_PATH), token_path: ENV.fetch("HCP_TOKEN_PATH", DEFAULT_TOKEN_PATH) ) # Validate configuration is populated @config.validate! end end # @return [String] authentication token def token # If a static token is defined, use that value return @token if @token # If no configuration is set, there is no auth to provide return if @config.nil? # If an HCP token exists and is not expired return @hcp_token.token if @hcp_token&.valid? # Generate a new HCP token refresh_token! @hcp_token.token end # @return [Boolean] Authentication token is available def available? !!(@token || @config) end private # Refresh the HCP oauth2 token. # @todo rescue exceptions and make them nicer def refresh_token! client = OAuth2::Client.new( @config.client_id, @config.client_secret, site: @config.auth_url, authorize_url: @config.auth_path, token_url: @config.token_path, ) begin response = client.client_credentials.get_token @hcp_token = HCPToken.new( token: response.token, expires_at: response.expires_at, ) rescue OAuth2::Error => err raise Error::ClientError::AuthenticationError, err.response.body.chomp, err.response.status end end end end vagrant_cloud-3.1.3/lib/vagrant_cloud/box.rb000066400000000000000000000074441477154370500211070ustar00rootroot00000000000000module VagrantCloud class Box < Data::Mutable autoload :Provider, "vagrant_cloud/box/provider" autoload :Version, "vagrant_cloud/box/version" attr_reader :organization attr_required :name attr_optional :created_at, :updated_at, :tag, :short_description, :description_html, :description_markdown, :private, :downloads, :current_version, :versions, :description, :username attr_mutable :short_description, :description, :private, :versions # Create a new instance # # @return [Box] def initialize(organization:, **opts) @organization = organization @versions_loaded = false opts[:username] = organization.username super(**opts) if opts[:versions] && !opts[:versions].empty? self.versions= Array(opts[:versions]).map do |version| Box::Version.load(box: self, **version) end end if opts[:current_version] clean(data: {current_version: Box::Version. load(box: self, **opts[:current_version])}) end clean! end # Delete this box # # @return [nil] # @note This will delete the box, and all versions def delete if exist? organization.account.client.box_delete( username: username, name: name ) b = organization.boxes.dup b.delete(self) organization.clean(data: {boxes: b}) end nil end # Add a new version of this box # # @param [String] version Version number # @return [Version] def add_version(version) if versions.any? { |v| v.version == version } raise Error::BoxError::VersionExistsError, "Version #{version} already exists for box #{tag}" end v = Version.new(box: self, version: version) clean(data: {versions: versions + [v]}) v end # Check if this instance is dirty # # @param [Boolean] deep Check nested instances # @return [Boolean] instance is dirty def dirty?(key=nil, deep: false) if key super(key) else d = super() || !exist? if deep && !d d = Array(plain_versions).any? { |v| v.dirty?(deep: true) } end d end end # @return [Boolean] box exists remotely def exist? !!created_at end # @return [Array] # @note This is used to allow versions information to be loaded # only when requested def versions_on_demand if !@versions_loaded if exist? r = self.organization.account.client.box_get(username: username, name: name) v = Array(r[:versions]).map do |version| Box::Version.load(box: self, **version) end clean(data: {versions: v + Array(plain_versions)}) else clean(data: {versions: []}) end @versions_loaded = true end plain_versions end alias_method :plain_versions, :versions alias_method :versions, :versions_on_demand # Save the box if any changes have been made # # @return [self] def save save_box if dirty? save_versions if dirty?(deep: true) self end protected # Save the box # # @return [self] def save_box req_args = { username: username, name: name, short_description: short_description, description: description, is_private: self.private } if exist? result = organization.account.client.box_update(**req_args) else result = organization.account.client.box_create(**req_args) end clean(data: result, ignores: [:current_version, :versions]) self end # Save the versions if any require saving # # @return [self] def save_versions versions.map(&:save) self end end end vagrant_cloud-3.1.3/lib/vagrant_cloud/box/000077500000000000000000000000001477154370500205515ustar00rootroot00000000000000vagrant_cloud-3.1.3/lib/vagrant_cloud/box/provider.rb000066400000000000000000000145161477154370500227370ustar00rootroot00000000000000module VagrantCloud class Box class Provider < Data::Mutable # Result for upload requests to upload directly to the # storage backend. # # @param [String] upload_url URL for uploading file asset # @param [String] callback_url URL callback to PUT after successful upload # @param [Proc] callback Callable proc to perform callback via configured client DirectUpload = Struct.new(:upload_url, :callback_url, :callback, keyword_init: true) attr_reader :version attr_required :name attr_optional :hosted, :created_at, :updated_at, :checksum, :checksum_type, :original_url, :download_url, :url, :architecture, :default_architecture attr_mutable :url, :checksum, :checksum_type, :architecture, :default_architecture def initialize(version:, **opts) if !version.is_a?(Version) raise TypeError, "Expecting type `#{Version.name}` but received `#{version.class.name}`" end @version = version super(**opts) end # Delete this provider # # @return [nil] def delete if exist? version.box.organization.account.client.box_version_provider_delete( username: version.box.username, name: version.box.name, version: version.version, provider: name, architecture: architecture ) pv = version.providers.dup pv.delete(self) version.clean(data: {providers: pv}) end nil end # Upload box file to be hosted on VagrantCloud. This # method provides different behaviors based on the # parameters passed. When the `direct` option is enabled # the upload target will be directly to the backend # storage. However, when the `direct` option is used the # upload process becomes a two steps where a callback # must be called after the upload is complete. # # If the path is provided, the file will be uploaded # and the callback will be requested if the `direct` # option is enabled. # # If a block is provided, the upload URL will be yielded # to the block. If the `direct` option is set, the callback # will be automatically requested after the block execution # has completed. # # If no path or block is provided, the upload URL will # be returned. If the `direct` option is set, the # `DirectUpload` instance will be yielded and it is # the caller's responsibility to issue the callback # # @param [String] path Path to asset # @param [Boolean] direct Upload directly to backend storage # @yieldparam [String] url URL to upload asset # @return [self, Object, String, DirectUpload] self when path provided, result of yield when block provided, URL otherwise # @note The callback request uses PUT request method def upload(path: nil, direct: false) if !exist? raise Error::BoxError::ProviderNotFoundError, "Provider #{name} not found for box #{version.box.tag} version #{version.version}" end if path && block_given? raise ArgumentError, "Only path or block may be provided, not both" end if path && !File.exist?(path) raise Errno::ENOENT, path end req_args = { username: version.box.username, name: version.box.name, version: version.version, provider: name, architecture: architecture, } if direct r = version.box.organization.account.client.box_version_provider_upload_direct(**req_args) else r = version.box.organization.account.client.box_version_provider_upload(**req_args) end result = DirectUpload.new( upload_url: r[:upload_path], callback_url: r[:callback], callback: proc { if r[:callback] version.box.organization.account.client. request(method: :put, path: URI.parse(r[:callback]).path) end } ) if block_given? block_r = yield result.upload_url result[:callback].call block_r elsif path File.open(path, "rb") do |file| chunks = lambda { file.read(Excon.defaults[:chunk_size]).to_s } Excon.put(result.upload_url, request_block: chunks) end result[:callback].call self else # When returning upload information for requester to complete, # return upload URL when `direct` option is false, otherwise # return the `DirectUpload` instance direct ? result : result.upload_url end end # @return [Boolean] provider exists remotely def exist? !!created_at end # Check if this instance is dirty # # @param [Boolean] deep Check nested instances # @return [Boolean] instance is dirty def dirty?(key=nil, **args) if key super(key) else super || !exist? end end # Save the provider if any changes have been made # # @return [self] def save save_provider if dirty? self end protected # Save the provider # # @return [self] def save_provider req_args = { username: version.box.username, name: version.box.name, version: version.version, provider: name, checksum: checksum, checksum_type: checksum_type, architecture: architecture, default_architecture: default_architecture, url: url } if exist? # If the provider already exists, use the original architecture # value for locating the existing record and use the current # architecture for the new_architecture value so it can be updated # properly req_args[:architecture] = data[:architecture] req_args[:new_architecture] = architecture result = version.box.organization.account.client.box_version_provider_update(**req_args) else result = version.box.organization.account.client.box_version_provider_create(**req_args) end clean(data: result) self end end end end vagrant_cloud-3.1.3/lib/vagrant_cloud/box/version.rb000066400000000000000000000107101477154370500225620ustar00rootroot00000000000000module VagrantCloud class Box class Version < Data::Mutable attr_reader :box attr_required :version attr_optional :status, :description_html, :description_markdown, :created_at, :updated_at, :number, :providers, :description attr_mutable :description def initialize(box:, **opts) if !box.is_a?(Box) raise TypeError, "Expecting type `#{Box.name}` but received `#{box.class.name}`" end @box = box opts[:providers] = Array(opts[:providers]).map do |provider| if provider.is_a?(Provider) provider else Provider.load(version: self, **provider) end end super(**opts) clean! end # Delete this version # # @return [nil] # @note This will delete the version, and all providers def delete if exist? box.organization.account.client.box_version_delete( username: box.username, name: box.name, version: version ) # Remove self from box v = box.versions.dup v.delete(self) box.clean(data: {versions: v}) end nil end # Release this version # # @return [self] def release if released? raise Error::BoxError::VersionStatusChangeError, "Version #{version} is already released for box #{box.tag}" end if !exist? raise Error::BoxError::VersionStatusChangeError, "Version #{version} for box #{box.tag} must be saved before release" end result = box.organization.account.client.box_version_release( username: box.username, name: box.name, version: version ) clean(data: result, only: :status) self end # Revoke this version # # @return [self] def revoke if !released? raise Error::BoxError::VersionStatusChangeError, "Version #{version} is not yet released for box #{box.tag}" end result = box.organization.account.client.box_version_revoke( username: box.username, name: box.name, version: version ) clean(data: result, only: :status) self end # @return [Boolean] def released? status == "active" end # Add a new provider for this version # # @param [String] pname Name of provider # @return [Provider] def add_provider(pname, architecture=nil) if providers.any? { |p| p.name == pname && (architecture.nil? || p.architecture == architecture) } raise Error::BoxError::VersionProviderExistsError, "Provider #{pname} already exists for box #{box.tag} version #{version} (#{architecture})" end pv = Provider.new( version: self, name: pname, ) pv.architecture = architecture if architecture clean(data: {providers: providers + [pv]}) pv end # Check if this instance is dirty # # @param [Boolean] deep Check nested instances # @return [Boolean] instance is dirty def dirty?(key=nil, deep: false) if key super(key) else d = super() || !exist? if deep && !d d = providers.any? { |p| p.dirty?(deep: true) } end d end end # @return [Boolean] version exists remotely def exist? !!created_at end # Save the version if any changes have been made # # @return [self] def save save_version if dirty? save_providers if dirty?(deep: true) self end protected # Save the version # # @return [self] def save_version params = { username: box.username, name: box.name, version: version, description: description } if exist? result = box.organization.account.client.box_version_update(**params) else result = box.organization.account.client.box_version_create(**params) end clean(data: result, ignores: :providers) self end # Save the providers if any require saving # # @return [self] def save_providers Array(providers).map(&:save) self end end end end vagrant_cloud-3.1.3/lib/vagrant_cloud/client.rb000066400000000000000000000500261477154370500215670ustar00rootroot00000000000000module VagrantCloud class Client include Logger # Path to the v1 API API_V1_PATH = "/api/v1".freeze # Path to the v2 API API_V2_PATH = "/api/v2".freeze # Default host URL API_DEFAULT_URL = "https://vagrantcloud.com".freeze # Valid methods that can be retried IDEMPOTENT_METHODS = [:get, :head].freeze # Number or allowed retries IDEMPOTENT_RETRIES = 3 # Number of seconds to wait between retries IDEMPOTENT_RETRY_INTERVAL = 2 # Methods which require query parameters QUERY_PARAMS_METHODS = [:get, :head, :delete].freeze # Default instrumentor DEFAULT_INSTRUMENTOR = Instrumentor::Collection.new # @return [Instrumentor::Collection] def self.instrumentor DEFAULT_INSTRUMENTOR end # @return [String] Base request path attr_reader :path_base # @return [String] URL for initializing connection attr_reader :url_base # @return [Integer] Number of retries on idempotent requests attr_reader :retry_count # @return [Integer] Number of seconds to wait between requests attr_reader :retry_interval # @return [Instrumentor::Collection] Instrumentor in use attr_reader :instrumentor # Create a new Client instance # # @param [String] access_token Authentication token for API requests # @param [String] url_base URL used to make API requests # @param [Integer] retry_count Number of retries on idempotent requests # @param [Integer] retry_interval Number of seconds to wait between requests # @param [Instrumentor::Core] instrumentor Instrumentor to use # @return [Client] def initialize(access_token: nil, url_base: nil, retry_count: nil, retry_interval: nil, instrumentor: nil) url_base = API_DEFAULT_URL if url_base.nil? remote_url = URI.parse(url_base) @url_base = "#{remote_url.scheme}://#{remote_url.host}" @path_base = remote_url.path if @path_base.empty? || @path_base == API_V1_PATH || @path_base == API_V2_PATH @path_base = nil end @auth = Auth.new(access_token: access_token) @retry_count = retry_count.nil? ? IDEMPOTENT_RETRIES : retry_count.to_i @retry_interval = retry_interval.nil? ? IDEMPOTENT_RETRY_INTERVAL : retry_interval.to_i @instrumentor = instrumentor.nil? ? Instrumentor::Collection.new : instrumentor headers = {}.tap do |h| h["Accept"] = "application/json" h["Content-Type"] = "application/json" end @connection_lock = Mutex.new @connection = Excon.new(url_base, headers: headers, instrumentor: @instrumentor ) end # @return [String] Access token for Vagrant Cloud def access_token @auth.token end # Use the remote connection # # @param [Boolean] wait Wait for the connection to be available # @yieldparam [Excon::Connection] # @return [Object] def with_connection(wait: true) raise ArgumentError, "Block expected but no block given" if !block_given? # Adds authentication header to connection if available set_authentication = ->(conn) { if @auth.available? conn.connection[:headers]["Authorization"] = "Bearer #{@auth.token}" end } if !wait raise Error::ClientError::ConnectionLockedError, "Connection is currently locked" if !@connection_lock.try_lock set_authentication.call(@connection) begin yield @connection ensure @connection_lock.unlock end else @connection_lock.synchronize do set_authentication.call(@connection) yield @connection end end end # Send a request # @param [String, Symbol] method Request method # @param [String, URI] path Path of request # @param [Hash] params Parameters to send with request # @return [Hash] def request(path:, method: :get, params: {}, api_version: 2) # Apply any path modifications that are required catch(:done) do # If a base path is defined, and the provided path # is already properly prefixed with it, do nothing. throw :done if !path_base.nil? && path.start_with?(path_base) # If the path does not include an API version # prefix, add it now. if !path.start_with?(API_V1_PATH) && !path.start_with?(API_V2_PATH) case api_version when 1 start_path = API_V1_PATH when 2 start_path = API_V2_PATH else raise ArgumentError, "Unsupported API version provided" end end path = [path_base, start_path, path].compact.join("/").gsub(/\/{2,}/, "/") end method = method.to_s.downcase.to_sym # Build base request parameters request_params = { method: method, path: path, expects: [200, 201, 204] } # If this is an idempotent request allow it to retry on failure if IDEMPOTENT_METHODS.include?(method) request_params[:idempotent] = true request_params[:retry_limit] = retry_count request_params[:retry_interval] = retry_interval end # If parameters are provided, set them in the expected location if !params.empty? # Copy the parameters so we can freely modify them params = clean_parameters(params) if QUERY_PARAMS_METHODS.include?(method) request_params[:query] = params else request_params[:body] = JSON.dump(params) end end # Set a request ID so we can track request/responses request_params[:headers] = {"X-Request-Id" => SecureRandom.uuid} begin result = with_connection { |c| c.request(request_params) } rescue Excon::Error::HTTPStatus => err raise Error::ClientError::RequestError.new( "Vagrant Cloud request failed", err.response.body, err.response.status) rescue Excon::Error => err raise Error::ClientError, err.message end parse_json(result.body) end # Clone this client to create a new instance # # @param [String] access_token Authentication token for API requests # @return [Client] def clone(access_token: nil) self.class.new(access_token: access_token, url_base: url_base, retry_count: retry_count, retry_interval: retry_interval ) end # Submit a search on Vagrant Cloud # # @param [String] query Search query # @param [String] architecture Limit results to only this architecture # @param [String] provider Limit results to only this provider # @param [String] sort Field to sort results ("downloads", "created", or "updated") # @param [String] order Order to return sorted result ("desc" or "asc") # @param [Integer] limit Number of results to return # @param [Integer] page Page number of results to return # @return [Hash] def search(query: Data::Nil, architecture: Data::Nil, provider: Data::Nil, sort: Data::Nil, order: Data::Nil, limit: Data::Nil, page: Data::Nil) params = { q: query, architecture: architecture, provider: provider, sort: sort, order: order, limit: limit, page: page } request(method: :get, path: "search", params: params) end # Create a new access token # # @param [String] username Vagrant Cloud username # @param [String] password Vagrant Cloud password # @param [String] description Description of token # @param [String] code 2FA code # @return [Hash] def authentication_token_create(username:, password:, description: Data::Nil, code: Data::Nil) params = { user: { login: username, password: password }, token: { description: description }, two_factor: { code: code } } request(method: :post, path: "authenticate", params: params, api_version: 1) end # Delete the token currently in use # # @return [Hash] empty def authentication_token_delete request(method: :delete, path: "authenticate", api_version: 1) end # Request a 2FA code is sent # # @param [String] username Vagrant Cloud username # @param [String] password Vagrant Cloud password # @param [String] delivery_method Delivery method of 2FA # @param [String] password Account password # @return [Hash] def authentication_request_2fa_code(username:, password:, delivery_method:) params = { two_factor: { delivery_method: delivery_method }, user: { login: username, password: password } } request(method: :post, path: "two-factor/request-code", params: params, api_version: 1) end # Validate the current token # # @return [Hash] emtpy def authentication_token_validate request(method: :get, path: "authenticate") end # Get an organization # # @param [String] name Name of organization # @return [Hash] organization information def organization_get(name:) request(method: :get, path: "user/#{name}") end # Get an existing box # # @param [String] username Username/organization name to create box under # @param [String] name Box name # @return [Hash] box information def box_get(username:, name:) request(method: :get, path: "/box/#{username}/#{name}") end # Create a new box # # @param [String] username Username/organization name to create box under # @param [String] name Box name # @param [String] short_description Short description of box # @param [String] description Long description of box (markdown supported) # @param [Boolean] is_private Set if box is private # @return [Hash] box information def box_create(username:, name:, short_description: Data::Nil, description: Data::Nil, is_private: Data::Nil) request(method: :post, path: '/boxes', params: { box: { username: username, name: name, short_description: short_description, description: description, is_private: is_private } }) end # Update an existing box # # @param [String] username Username/organization name to create box under # @param [String] name Box name # @param [String] short_description Short description of box # @param [String] description Long description of box (markdown supported) # @param [Boolean] is_private Set if box is private # @return [Hash] box information def box_update(username:, name:, short_description: Data::Nil, description: Data::Nil, is_private: Data::Nil) params = { box: { short_description: short_description, description: description, is_private: is_private } } request(method: :put, path: "/box/#{username}/#{name}", params: params) end # Delete an existing box # # @param [String] username Username/organization name to create box under # @param [String] name Box name # @return [Hash] box information def box_delete(username:, name:) request(method: :delete, path: "/box/#{username}/#{name}") end # Get an existing box version # # @param [String] username Username/organization name to create box under # @param [String] name Box name # @param [String] version Box version # @return [Hash] box version information def box_version_get(username:, name:, version:) request(method: :get, path: "/box/#{username}/#{name}/version/#{version}") end # Create a new box version # # @param [String] username Username/organization name to create box under # @param [String] name Box name # @param [String] version Box version # @param [String] description Box description # @return [Hash] box version information def box_version_create(username:, name:, version:, description: Data::Nil) request(method: :post, path: "/box/#{username}/#{name}/versions", params: { version: { version: version, description: description } }) end # Update an existing box version # # @param [String] username Username/organization name to create box under # @param [String] name Box name # @param [String] version Box version # @param [String] description Box description # @return [Hash] box version information def box_version_update(username:, name:, version:, description: Data::Nil) params = { version: { version: version, description: description } } request(method: :put, path: "/box/#{username}/#{name}/version/#{version}", params: params) end # Delete an existing box version # # @param [String] username Username/organization name to create box under # @param [String] name Box name # @param [String] version Box version # @return [Hash] box version information def box_version_delete(username:, name:, version:) request(method: :delete, path: "/box/#{username}/#{name}/version/#{version}") end # Release an existing box version # # @param [String] username Username/organization name to create box under # @param [String] name Box name # @param [String] version Box version # @return [Hash] box version information def box_version_release(username:, name:, version:) request(method: :put, path: "/box/#{username}/#{name}/version/#{version}/release") end # Revoke an existing box version # # @param [String] username Username/organization name to create box under # @param [String] name Box name # @param [String] version Box version # @return [Hash] box version information def box_version_revoke(username:, name:, version:) request(method: :put, path: "/box/#{username}/#{name}/version/#{version}/revoke") end # Get an existing box version provider # # @param [String] username Username/organization name to create box under # @param [String] name Box name # @param [String] version Box version # @param [String] provider Provider name # @param [String] architecture Architecture name # @return [Hash] box version provider information def box_version_provider_get(username:, name:, version:, provider:, architecture: nil) req_path = ["/box", username, name, "version", version, "provider", provider, architecture].compact.join("/") api_version = architecture.nil? ? 1 : 2 request(method: :get, path: req_path, api_version: api_version) end # Create a new box version provider # # @param [String] username Username/organization name to create box under # @param [String] name Box name # @param [String] version Box version # @param [String] provider Provider name # @param [String] architecture Architecture name # @param [Boolean] default_architecture Flag architecture as default in named provider group # @param [String] url Remote URL for box download # @return [Hash] box version provider information def box_version_provider_create(username:, name:, version:, provider:, architecture: nil, default_architecture: Data::Nil, url: Data::Nil, checksum: Data::Nil, checksum_type: Data::Nil) provider_params = { name: provider, url: url, checksum: checksum, checksum_type: checksum_type } if architecture.nil? api_version = 1 else api_version = 2 provider_params.merge!( architecture: architecture, default_architecture: default_architecture ) end request( method: :post, path: "/box/#{username}/#{name}/version/#{version}/providers", params: { provider: provider_params }, api_version: api_version ) end # Update an existing box version provider # # @param [String] username Username/organization name to create box under # @param [String] name Box name # @param [String] version Box version # @param [String] provider Provider name # @param [String] architecture Current architecture name # @param [String] new_architecture New architecture name to apply # @param [String] url Remote URL for box download # @return [Hash] box version provider information def box_version_provider_update(username:, name:, version:, provider:, architecture: nil, new_architecture: Data::Nil, default_architecture: Data::Nil, url: Data::Nil, checksum: Data::Nil, checksum_type: Data::Nil) provider_params = { name: provider, url: url, checksum: checksum, checksum_type: checksum_type } if architecture.nil? api_version = 1 else api_version = 2 provider_params.merge!( architecture: new_architecture, default_architecture: default_architecture ) end req_path = ["/box", username, name, "version", version, "provider", provider, architecture].compact.join("/") request(method: :put, path: req_path, params: {provider: provider_params}, api_version: api_version) end # Delete an existing box version provider # # @param [String] username Username/organization name to create box under # @param [String] name Box name # @param [String] version Box version # @param [String] provider Provider name # @param [String] architecture Architecture name # @return [Hash] box version provider information def box_version_provider_delete(username:, name:, version:, provider:, architecture: nil) req_path = ["/box", username, name, "version", version, "provider", provider, architecture].compact.join("/") api_version = architecture.nil? ? 1 : 2 request(method: :delete, path: req_path, api_version: api_version) end # Upload a box asset for an existing box version provider # # @param [String] username Username/organization name to create box under # @param [String] name Box name # @param [String] version Box version # @param [String] provider Provider name # @param [String] architecture Architecture name # @return [Hash] box version provider upload information (contains upload_path entry) def box_version_provider_upload(username:, name:, version:, provider:, architecture: nil) req_path = ["/box", username, name, "version", version, "provider", provider, architecture, "upload"].compact.join("/") api_version = architecture.nil? ? 1 : 2 request(method: :get, path: req_path, api_version: api_version) end # Upload a box asset directly to the backend storage for an existing box version provider # # @param [String] username Username/organization name to create box under # @param [String] name Box name # @param [String] version Box version # @param [String] provider Provider name # @param [String] architecture Architecture name # @return [Hash] box version provider upload information (contains upload_path and callback entries) def box_version_provider_upload_direct(username:, name:, version:, provider:, architecture: nil) req_path = ["/box", username, name, "version", version, "provider", provider, architecture, "upload/direct"].compact.join("/") api_version = architecture.nil? ? 1 : 2 request(method: :get, path: req_path, api_version: api_version) end protected # Parse a string of JSON # # @param [String] string String of JSON data # @return [Object] # @note All keys are symbolized when parsed def parse_json(string) return {} if string.empty? JSON.parse(string, symbolize_names: true) end # Remove any values that have a default value set # # @param [Object] item Item to clean # @return [Object] cleaned item def clean_parameters(item) case item when Array item = item.find_all { |i| i != Data::Nil } item.map! { |i| clean_parameters(i) } when Hash item = item.dup item.delete_if{ |_,v| v == Data::Nil } item.keys.each do |k| item[k] = clean_parameters(item[k]) end end item end end end vagrant_cloud-3.1.3/lib/vagrant_cloud/data.rb000066400000000000000000000176761477154370500212400ustar00rootroot00000000000000module VagrantCloud # Generic data class which provides simple attribute # data storage using a Hash like interface class Data # Custom nil class which is used for signifying # a nil value that was not set by the user. This # makes it easy to filter out values which are # unset vs. those that are set to nil. class NilClass < BasicObject include ::Singleton def nil?; true; end def ==(v); v.nil? || super(v); end def ===(v); equal?(v); end def equal?(v); v.nil? || super(v); end def to_i; 0; end def to_f; 0.0; end def to_a; []; end def to_h; {}; end def to_s; ""; end def &(_); false; end def |(_); false; end def ^(_); false; end def !; true; end def inspect; 'nil'; end end # Easy to use constant to access general # use instance of our custom nil class Nil = NilClass.instance # Create a new instance # # @return [Data] def initialize(**opts) @data = opts end # Fetch value from data # # @param [String, Symbol] k Name of value # @return [Object] def [](k) @data.key?(k.to_sym) ? @data[k.to_sym] : Nil end # @return [String] def inspect "<#{self.class.name}:#{sprintf("%#x", object_id)}>" end protected def data; @data; end # Immutable data class. This class adds extra functionality to the Data # class like providing attribute methods which can be defined using the # `attr_required` and `attr_optional` methods. Once an instance is created # the data is immutable. For example: # # class MyData < Immutable # attr_required :name # attr_optional :version # end # # When creating a new instance, a name parameter _must_ be provided, # but a version parameter is optional, so both are valid: # # instance = MyData.new(name: "testing", version: "new-version") # # and # # instance = MyData.new(name: "testing") # # but only providing the version is invalid: # # instance = MyData.new(version: "new-version") # -> Exception class Immutable < Data @@lock = Mutex.new # Define attributes which are required def self.attr_required(*args) return @required || [] if args.empty? sync do @required ||= [] if !args.empty? # Create any accessor methods which do not yet exist args = args.map(&:to_sym) - @required args.each do |argument_name| if !method_defined?(argument_name) define_method(argument_name) { send(:[], argument_name.to_sym) } end end @required += args end @required end end # Define attributes which are optional def self.attr_optional(*args) return @optional || [] if args.empty? sync do @optional ||= [] if !args.empty? # Create any accessor method which do not yet exist args = args.map(&:to_sym) - @optional args.each do |argument_name| if !method_defined?(argument_name) define_method(argument_name) { send(:[], argument_name.to_sym) } end end @optional += args end @optional end end # If inherited, set attribute information def self.inherited(klass) klass.attr_required(*attr_required) klass.attr_optional(*attr_optional) klass.class_variable_set(:@@lock, Mutex.new) end # Synchronize action def self.sync @@lock.synchronize do yield end end # Create a new instance # # @return [Immutable] def initialize(**opts) super() self.class.attr_required.each do |attr| if !opts.key?(attr) raise ArgumentError, "Missing required parameter `#{attr}`" end data[attr.to_sym] = opts[attr].dup end self.class.attr_optional.each do |attr| if opts.key?(attr) data[attr.to_sym] = opts[attr].dup end end extras = opts.keys - (self.class.attr_required + self.class.attr_optional) if !extras.empty? raise ArgumentError, "Unknown parameters provided: #{extras.join(",")}" end freezer(@data) end # @return [String] def inspect vars = (self.class.attr_required + self.class.attr_optional).map do |k| val = self.send(:[], k) next if val.nil? || val.to_s.empty? "#{k}=#{val.inspect}" end.compact.join(", ") "<#{self.class.name}:#{sprintf("%#x", object_id)} #{vars}>" end protected # Freeze the given object and all nested # objects that can be found # # @return [Object] def freezer(obj) if obj.is_a?(Enumerable) obj.each do |item| freezer(item) item.freeze end end obj.freeze end end # Mutable data class class Mutable < Immutable # Define attributes which are mutable def self.attr_mutable(*args) sync do args.each do |attr| if !attr_required.include?(attr.to_sym) && !attr_optional.include?(attr.to_sym) raise ArgumentError, "Unknown attribute name provided `#{attr}`" end define_method("#{attr}=") { |v| dirty[attr.to_sym] = v } end end end # Load data and create a new instance # # @param [Hash] options Value to initialize instance # @return [Mutable] def self.load(options={}) opts = {}.tap do |o| (attr_required + attr_optional + self.instance_method(:initialize).parameters.find_all { |i| i.first == :key || i.first == :keyreq }.map(&:last)).each do |k| o[k.to_sym] = options[k.to_sym] end end self.new(**opts) end # Create a new instance # # @return [Mutable] def initialize(**opts) super @dirty = {} end # Fetch value from data # # @param [String, Symbol] k Name of value # @return [Object] def [](k) if dirty?(k) @dirty[k.to_sym] else super end end # Check if instance is dirty or specific # attribute if key is provided # # @param [Symbol] key Key to check # @return [Boolean] instance is dirty def dirty?(key=nil, **opts) if key.nil? !@dirty.empty? else @dirty.key?(key.to_sym) end end # Load given data and ignore any fields # that are provided. Flush dirty state. # # @param [Hash] data Attribute data to load # @param [Array] ignores Fields to skip # @param [Array] only Fields to update # @return [self] def clean(data:, ignores: [], only: []) raise TypeError, "Expected type `Hash` but received `#{data.inspect}`" if !data.is_a?(Hash) new_data = @data.dup ignores = Array(ignores).map(&:to_sym) only = Array(only).map(&:to_sym) data.each do |k, v| k = k.to_sym next if ignores.include?(k) next if !only.empty? && !only.include?(k) if self.respond_to?(k) new_data[k] = v @dirty.delete(k) end end @data = freezer(new_data) self end # Merge values from dirty cache into data # # @return [self] def clean! @data = freezer(@data.merge(@dirty)) @dirty.clear self end # @return [self] disable freezing def freeze self end # @return [Hash] updated attributes protected def dirty; @dirty; end end end Nil = Data::Nil end vagrant_cloud-3.1.3/lib/vagrant_cloud/error.rb000066400000000000000000000032031477154370500214350ustar00rootroot00000000000000module VagrantCloud class Error < StandardError class ClientError < Error class RequestError < ClientError attr_accessor :error_code attr_accessor :error_arr def initialize(msg, http_body, http_code) message = msg begin errors = JSON.parse(http_body) if errors.is_a?(Hash) vagrant_cloud_msg = errors['errors'] if vagrant_cloud_msg.is_a?(Array) message = msg + ' - ' + vagrant_cloud_msg.map(&:to_s).join(', ').to_s elsif !vagrant_cloud_msg.to_s.empty? message = msg + ' - ' + vagrant_cloud_msg.to_s end end rescue JSON::ParserError => err vagrant_cloud_msg = err.message end @error_arr = Array(vagrant_cloud_msg) @error_code = http_code.to_i super(message) end end class ConnectionLockedError < ClientError; end class AuthenticationError < ClientError def initialize(msg, http_code) @error_arr = [msg] @error_code = http_code.to_i super(msg) end end end class BoxError < Error class InvalidVersionError < BoxError def initialize(version_number) message = 'Invalid version given: ' + version_number super(message) end end class BoxExistsError < BoxError; end class ProviderNotFoundError < BoxError; end class VersionExistsError < BoxError; end class VersionStatusChangeError < BoxError; end class VersionProviderExistsError < BoxError; end end end end vagrant_cloud-3.1.3/lib/vagrant_cloud/instrumentor.rb000066400000000000000000000003461477154370500230620ustar00rootroot00000000000000module VagrantCloud module Instrumentor autoload :Collection, "vagrant_cloud/instrumentor/collection" autoload :Core, "vagrant_cloud/instrumentor/core" autoload :Logger, "vagrant_cloud/instrumentor/logger" end end vagrant_cloud-3.1.3/lib/vagrant_cloud/instrumentor/000077500000000000000000000000001477154370500225325ustar00rootroot00000000000000vagrant_cloud-3.1.3/lib/vagrant_cloud/instrumentor/collection.rb000066400000000000000000000067411477154370500252220ustar00rootroot00000000000000module VagrantCloud module Instrumentor class Collection < Core # @return [Set] attr_reader :instrumentors # @return [Set] attr_reader :subscriptions # Create a new instance # # @param [Array] instrumentors Instrumentors to add to collection def initialize(instrumentors: []) @lock = Mutex.new @subscriptions = Set.new @instrumentors = Set.new # Add our default @instrumentors << Logger.new Array(instrumentors).each do |i| if !i.is_a?(Core) && !i.respond_to?(:instrument) raise TypeError, "Instrumentors must implement `#instrument`" end @instrumentors << i end @instrumentors.freeze end # Add a new instrumentor # # @param [Core] instrumentor New instrumentor to add # @return [self] def add(instrumentor) @lock.synchronize do if !instrumentor.is_a?(Core) && !instrumentor.respond_to?(:instrument) raise TypeError, "Instrumentors must implement `#instrument`" end @instrumentors = (instrumentors + [instrumentor]).freeze end self end # Remove instrumentor # # @param [Core] instrumentor Remove instrumentor from collection # @return [self] def remove(instrumentor) @lock.synchronize do @instrumentors = instrumentors.dup.tap{|i| i.delete(instrumentor)}.freeze end self end # Add a subscription for events # # @param [Regexp, String] event Event to match def subscribe(event, callable=nil, &block) if callable && block raise ArgumentError, "Callable argument or block expected, not both" end c = callable || block if !c.respond_to?(:call) raise TypeError, "Callable action is required for subscription" end entry = [event, c] @lock.synchronize do @subscriptions = (@subscriptions + [entry]).freeze end self end def unsubscribe(callable) @lock.synchronize do subscriptions = @subscriptions.dup subscriptions.delete_if { |entry| entry.last == callable } @subscriptions = subscriptions.freeze end self end # Call all instrumentors in collection with given parameters def instrument(name, params = {}) # Log the start time timing = {start_time: Time.now} # Run the action result = yield if block_given? # Log the completion time and calculate duration timing[:complete_time] = Time.now timing[:duration] = timing[:complete_time] - timing[:start_time] # Insert timing into params params[:timing] = timing # Call any instrumentors we know about @lock.synchronize do # Call our instrumentors first instrumentors.each do |i| i.instrument(name, params) end # Now call any matching subscriptions subscriptions.each do |event, callable| if event.is_a?(Regexp) next if !event.match(name) else next if event != name end args = [name, params] if callable.arity > -1 args = args[0, callable.arity] end callable.call(*args) end end result end end end end vagrant_cloud-3.1.3/lib/vagrant_cloud/instrumentor/core.rb000066400000000000000000000002201477154370500240010ustar00rootroot00000000000000module VagrantCloud module Instrumentor class Core def instrument(*_) raise NotImplementedError end end end end vagrant_cloud-3.1.3/lib/vagrant_cloud/instrumentor/logger.rb000066400000000000000000000060331477154370500243400ustar00rootroot00000000000000module VagrantCloud module Instrumentor class Logger < Core REDACTED = "REDACTED".freeze include VagrantCloud::Logger # Perform event logging # # @param [String] name Name of event "namespace.event" # @param [Hash] params Data available with event def instrument(name, params = {}) namespace, event = name.split(".", 2) if event == "error" logger.error { "#{namespace} #{event.upcase} #{params[:error]}" } return end logger.info do case namespace when "excon" # Make a copy so we can modify params = params.dup info = excon(event, params) else info = params.dup end "#{namespace} #{event.upcase} #{format_output(info)}" end logger.debug do "#{namespace} #{event.upcase} #{format_output(params)}" end end # Format output to make it look nicer # # @param [Hash] info Output information # @return [String] def format_output(info) info.map do |key, value| if value.is_a?(Enumerable) value = value.map{ |k,v| [k, v].compact.join(": ") }.join(", ") end "#{key}=#{value.inspect}" end.join(" ") end # Generate information based on excon event # # @param [String] event Event name # @param [Hash] params Event data # @return [Hash] data to be printed def excon(event, params) # Remove noisy stuff that may be present from excon params.delete(:connection) params.delete(:stack) # Remove any credential information params[:password] = REDACTED if params.key?(:password) params[:access_token] = REDACTED if params[:access_token] if params.dig(:headers, "Authorization") || params.dig(:headers, "Proxy-Authorization") params[:headers] = params[:headers].dup.tap do |h| h["Authorization"] = REDACTED if h["Authorization"] h["Proxy-Authorization"] = REDACTED if h["Proxy-Authorization"] end end if params.dig(:proxy, :password) params[:proxy] = params[:proxy].dup.tap do |proxy| proxy[:password] = REDACTED end end info = {} case event when "request", "retry" info[:method] = params[:method] info[:identifier] = params.dig(:headers, 'X-Request-Id') info[:url] = "#{params[:scheme]}://#{File.join(params[:host], params[:path])}" info[:query] = params[:query] if params[:query] info[:headers] = params[:headers] if params[:headers] when "response" info[:status] = params[:status] info[:identifier] = params.dig(:headers, 'X-Request-Id') info[:body] = params[:body] else info = params.dup end duration = (params.dig(:timing, :duration).to_f * 1000).to_i info[:duration] = "#{duration}ms" info end end end end vagrant_cloud-3.1.3/lib/vagrant_cloud/logger.rb000066400000000000000000000037331477154370500215730ustar00rootroot00000000000000module VagrantCloud module Logger @@lock = Mutex.new # @return [Log4r::Logger] default logger def self.default @@lock.synchronize do if !@logger # Require Log4r and define the levels we'll be using require 'log4r/config' Log4r.define_levels(*Log4r::Log4rConfig::LogLevels) level = nil begin level = Log4r.const_get(ENV.fetch("VAGRANT_CLOUD_LOG", "FATAL").upcase) rescue NameError # This means that the logging constant wasn't found, # which is fine. We just keep `level` as `nil`. But # we tell the user. level = nil end # Some constants, such as "true" resolve to booleans, so the # above error checking doesn't catch it. This will check to make # sure that the log level is an integer, as Log4r requires. level = nil if !level.is_a?(Integer) # Only override the log output format if the default is set if Log4r::Outputter.stderr.formatter.is_a?(Log4r::DefaultFormatter) base_formatter = Log4r::PatternFormatter.new( pattern: "%d [%5l] %m", date_pattern: "%F %T" ) Log4r::Outputter.stderr.formatter = base_formatter end logger = Log4r::Logger.new("vagrantcloud") logger.outputters = Log4r::Outputter.stderr logger.level = level @logger = logger end end @logger end def self.included(klass) klass.class_variable_set(:@@logger, Log4r::Logger.new(klass.name.downcase)) klass.class_eval { define_method(:logger) { self.class.class_variable_get(:@@logger) } } end # @return [Log4r::Logger] logger instance for current context def logger @@lock.synchronize do if !@logger @logger = Log4r::Logger.new(self.class.name.downcase) end @logger end end end Logger.default end vagrant_cloud-3.1.3/lib/vagrant_cloud/organization.rb000066400000000000000000000026001477154370500230100ustar00rootroot00000000000000module VagrantCloud class Organization < Data::Mutable attr_reader :account attr_required :username attr_optional :boxes, :avatar_url, :profile_html, :profile_markdown attr_mutable :boxes def initialize(account:, **opts) @account = account opts[:boxes] ||= [] super(**opts) bxs = boxes.map do |b| if !b.is_a?(Box) b = Box.load(organization: self, **b) end b end clean(data: {boxes: bxs}) end # Add a new box to the organization # # @param [String] name Name of the box # @return [Box] def add_box(name) if boxes.any? { |b| b.name == name } raise Error::BoxError::BoxExistsError, "Box with name #{name} already exists" end b = Box.new(organization: self, name: name) clean(data: {boxes: boxes + [b]}) b end # Check if this instance is dirty # # @param [Boolean] deep Check nested instances # @return [Boolean] instance is dirty def dirty?(key=nil, deep: false) if key super(key) else d = super() if deep && !d d = boxes.any? { |b| b.dirty?(deep: true) } end d end end # Save the organization # # @return [self] # @note This only saves boxes within organization def save boxes.map(&:save) self end end end vagrant_cloud-3.1.3/lib/vagrant_cloud/response.rb000066400000000000000000000003671477154370500221520ustar00rootroot00000000000000module VagrantCloud class Response < Data::Immutable autoload :CreateToken, "vagrant_cloud/response/create_token" autoload :Request2FA, "vagrant_cloud/response/request_2fa" autoload :Search, "vagrant_cloud/response/search" end end vagrant_cloud-3.1.3/lib/vagrant_cloud/response/000077500000000000000000000000001477154370500216175ustar00rootroot00000000000000vagrant_cloud-3.1.3/lib/vagrant_cloud/response/create_token.rb000066400000000000000000000002331477154370500246050ustar00rootroot00000000000000module VagrantCloud class Response class CreateToken < Response attr_required :token, :token_hash, :created_at, :description end end end vagrant_cloud-3.1.3/lib/vagrant_cloud/response/request_2fa.rb000066400000000000000000000001701477154370500243620ustar00rootroot00000000000000module VagrantCloud class Response class Request2FA < Response attr_required :destination end end end vagrant_cloud-3.1.3/lib/vagrant_cloud/response/search.rb000066400000000000000000000031761477154370500234200ustar00rootroot00000000000000module VagrantCloud class Response class Search < Response # @return [Account] attr_reader :account # @return [Hash] search parameters attr_reader :search_parameters attr_optional :boxes def initialize(account:, params:, **opts) if !account.is_a?(Account) raise TypeError, "Expected type `#{Account.name}` but received `#{account.class.name}`" end @account = account @search_parameters = params opts[:boxes] = reload_boxes(opts[:boxes]) super(**opts) end # @return [Integer] def page pg = @search_parameters.fetch(:page, 0).to_i pg > 0 ? pg : 1 end # @return [Search] previous page of search results def previous if page <= 1 raise ArgumentError, "Cannot request page results less than one" end account.searcher.from_response(self) do |s| s.prev_page end end # @return [Search] next page of search results def next account.searcher.from_response(self) do |s| s.next_page end end protected # Load all the box data into proper instances def reload_boxes(boxes) org_cache = {} boxes.map do |b| org_name = b[:username] if !org_cache[org_name] org_cache[org_name] = account.organization(name: org_name) end org = org_cache[org_name] box = Box.new(organization: org, **b) org.boxes = org.boxes + [box] org.clean! box end end end end end vagrant_cloud-3.1.3/lib/vagrant_cloud/search.rb000066400000000000000000000067431477154370500215650ustar00rootroot00000000000000module VagrantCloud class Search # @return [Account] attr_reader :account # Create a new search instance # # @param [String] access_token Authentication token # @param [Account] account Account instance # @param [Client] client Client instance # @return [Search] def initialize(access_token: nil, account: nil, client: nil) args = {access_token: access_token, account: account, client: client}.compact if args.size > 1 raise ArgumentError, "Search accepts `access_token`, `account`, or `client` but received multiple (#{args.keys.join(", ")})" end if client if !client.is_a?(Client) raise TypeError, "Expecting type `#{Client.name}` but received `#{client.class.name}`" end @account = Account.new(client: client) elsif account if !account.is_a?(Account) raise TypeError, "Expecting type `#{Account.name}` but received `#{account.class.name}`" end @account = account else @account = Account.new(access_token: access_token) end @params = {} @lock = Mutex.new end # Requests a search based on the given parameters # # @param [String] query # @param [String] provider # @param [String] sort # @param [String] order # @param [String] limit # @param [String] page # @return [Response::Search] def search(query: Data::Nil, architecture: Data::Nil, provider: Data::Nil, sort: Data::Nil, order: Data::Nil, limit: Data::Nil, page: Data::Nil) @lock.synchronize do @params = { query: query, architecture: architecture, provider: provider, sort: sort, order: order, limit: limit, page: page } execute end end # Request the next page of the search results # # @param [Response::Search] def next_page @lock.synchronize do if @params.empty? raise ArgumentError, "No active search currently cached" end page = @params[:page].to_i page = 1 if page < 1 @params[:page] = page + 1 execute end end # Request the previous page of the search results # # @param [Response::Search] def prev_page @lock.synchronize do if @params.empty? raise ArgumentError, "No active search currently cached" end page = @params[:page].to_i - 1 @params[:page] = page < 1 ? 1 : page execute end end # @return [Boolean] Search terms are stored def active? !@params.empty? end # Clear the currently cached search parameters # # @return [self] def clear! @lock.synchronize { @params.clear } self end # Seed the parameters # # @return [self] def seed(**params) @lock.synchronize { @params = params } self end # Generate a new instance seeded with search # parameters from given response # # @param [Response::Search] response Search response # @yieldparam [Search] Seeded search instance # @return [Object] result of given block def from_response(response) s = self.class.new(account: account) yield s.seed(**response.search_parameters) end protected # @return [Response::Search] def execute r = account.client.search(**@params) Response::Search.new(account: account, params: @params, **r) end end end vagrant_cloud-3.1.3/lib/vagrant_cloud/version.rb000066400000000000000000000001641477154370500217740ustar00rootroot00000000000000module VagrantCloud VERSION = Gem::Version.new(File.read(File.expand_path("../../../version.txt", __FILE__))) end vagrant_cloud-3.1.3/spec/000077500000000000000000000000001477154370500153155ustar00rootroot00000000000000vagrant_cloud-3.1.3/spec/spec_helper.rb000066400000000000000000000003211477154370500201270ustar00rootroot00000000000000require 'webmock/rspec' RSpec.configure do |config| config.expect_with :rspec do |c| c.syntax = :expect end end WebMock.disable_net_connect!(allow_localhost: true) ENV.delete("VAGRANT_CLOUD_TOKEN") vagrant_cloud-3.1.3/spec/unit/000077500000000000000000000000001477154370500162745ustar00rootroot00000000000000vagrant_cloud-3.1.3/spec/unit/vagrant_cloud/000077500000000000000000000000001477154370500211245ustar00rootroot00000000000000vagrant_cloud-3.1.3/spec/unit/vagrant_cloud/account_spec.rb000066400000000000000000000162661477154370500241320ustar00rootroot00000000000000require 'spec_helper' require 'vagrant_cloud' describe VagrantCloud::Account do let(:access_token) { double("access_token") } let(:client) { double("client", access_token: access_token) } let(:username) { double("username") } let(:subject) { described_class.new(access_token: access_token) } before do allow(VagrantCloud::Client).to receive(:new).with(hash_including(access_token: access_token)).and_return(client) allow(client).to receive(:authentication_token_validate). and_return(user: {username: username}) end describe "#initialize" do it "should support a custom server" do expect(VagrantCloud::Client).to receive(:new).with(hash_including(url_base: "example.com")) described_class.new(access_token: access_token, custom_server: "example.com") end it "should support retry count" do expect(VagrantCloud::Client).to receive(:new).with(hash_including(retry_count: 1)) described_class.new(access_token: access_token, retry_count: 1) end it "should support retry interval" do expect(VagrantCloud::Client).to receive(:new).with(hash_including(retry_interval: 1)) described_class.new(access_token: access_token, retry_interval: 1) end it "should support custom instrumentor" do i = double("instrumentor") expect(VagrantCloud::Client).to receive(:new).with(hash_including(instrumentor: i)) described_class.new(access_token: access_token, instrumentor: i) end it "should set the username during initialization" do expect(subject.username).to eq(username) end end describe "#searcher" do it "should create a new Searcher instance" do expect(subject.searcher).to be_a(VagrantCloud::Search) end it "should be attached to the account" do expect(subject.searcher.account).to eq(subject) end end describe "#create_token" do let(:password) { double("password") } let(:response) { {token: token, token_hash: token_hash, created_at: created_at, description: description} } let(:token) { "TOKEN" } let(:token_hash) { "TOKEN_HASH" } let(:created_at) { "CREATED_AT" } let(:description) { "DESCRIPTION" } before { allow(client).to receive(:authentication_token_create).and_return(response) } it "should require a password" do expect { subject.create_token }.to raise_error(ArgumentError) end it "should return a create token response" do expect(subject.create_token(password: password)). to be_an_instance_of(VagrantCloud::Response::CreateToken) end it "should send username and password" do expect(client).to receive(:authentication_token_create).with(hash_including(username: username, password: password)). and_return(response) subject.create_token(password: password) end it "should send description and two factor code if provided" do expect(client).to receive(:authentication_token_create).with(hash_including(description: description, code: "CODE")). and_return(response) subject.create_token(password: password, description: description, code: "CODE") end end describe "#delete_token" do it "should send DELETE request to authenticate" do expect(client).to receive(:authentication_token_delete) subject.delete_token end it "should return itself" do allow(client).to receive(:authentication_token_delete) expect(subject.delete_token).to eq(subject) end end describe "#validate_token" do it "should call authenticate" do expect(client).to receive(:authentication_token_validate) subject.validate_token end it "should return self" do allow(client).to receive(:request) expect(subject.validate_token).to eq(subject) end end describe "#request_2fa_code" do let(:delivery_method) { double("delivery_method") } let(:password) { double("password") } let(:response) { {two_factor: {obfuscated_destination: "2fa-dst"}} } before { allow(client).to receive(:authentication_request_2fa_code).and_return(response) } it "should require delivery method" do expect { subject.request_2fa_code(password: password)}. to raise_error(ArgumentError) end it "should require password" do expect { subject.request_2fa_code(delivery_method: delivery_method) }. to raise_error(ArgumentError) end it "should return a 2FA request response" do expect(subject.request_2fa_code(delivery_method: delivery_method, password: password)). to be_an_instance_of(VagrantCloud::Response::Request2FA) end it "should include 2FA request information" do expect(client).to receive(:authentication_request_2fa_code).with(hash_including(username: username, password: password, delivery_method: delivery_method)). and_return(response) subject.request_2fa_code(delivery_method: delivery_method, password: password) end it "should make a post request to the request code path" do expect(client).to receive(:authentication_request_2fa_code).with(hash_including(username: username, password: password, delivery_method: delivery_method)). and_return(response) subject.request_2fa_code(delivery_method: delivery_method, password: password) end end describe "#organization" do let(:response) { {username: r_username} } let(:r_username) { "R_USERNAME" } let(:username) { "username" } before { allow(client).to receive(:organization_get).and_return(response) } it "should request account username organization by default" do expect(client).to receive(:organization_get).with(name: username). and_return(response) subject.organization end it "should request organization with given name" do expect(client).to receive(:organization_get).with(name: r_username). and_return(response) subject.organization(name: r_username) end it "should return an organization instance" do expect(subject.organization).to be_an_instance_of(VagrantCloud::Organization) end it "should set the account into the organization instance" do expect(subject.organization.account).to eq(subject) end end describe "#setup!" do let(:response) { {user: {username: different_username}} } let(:different_username) { double("different_username") } before { allow(client).to receive(:authentication_token_validate). and_return(response) } it "should make a request to authenticate" do expect(client).to receive(:authentication_token_validate).and_return(response) subject.send(:setup!) end it "should extract the username" do expect(subject.send(:setup!)).to eq(different_username) end context "when client is built without access token" do let(:c) { double("empty_client", access_token: nil) } let(:instance) { described_class.new(access_token: nil) } before do subject allow(VagrantCloud::Client).to receive(:new).with(hash_including(access_token: nil)). and_return(c) end it "should not fetch the token username" do expect(c).not_to receive(:authentication_token_validate) expect(instance.send(:setup!)).to be_nil end end end end vagrant_cloud-3.1.3/spec/unit/vagrant_cloud/auth_spec.rb000066400000000000000000000131431477154370500234260ustar00rootroot00000000000000require 'spec_helper' require 'vagrant_cloud' describe VagrantCloud::Auth do let(:client_id) { nil } let(:client_secret) { nil } let(:auth_url) { nil } let(:auth_path) { nil } let(:token_path) { nil } # Remove any environment variables that # maybe set that are used by auth before do allow(ENV).to receive(:[]).and_call_original allow(ENV).to receive(:[]).with("HCP_CLIENT_ID").and_return(client_id) allow(ENV).to receive(:[]).with("HCP_CLIENT_SECRET").and_return(client_secret) allow(ENV).to receive(:fetch) do |name, default_value| send(name.sub("HCP_", "").downcase) || default_value end end describe "#initialize" do context "with no arguments" do describe "#token" do it "should return nil" do expect(subject.token).to be_nil end end describe "#available?" do it "should return false" do expect(subject).not_to be_available end end end context "with access token provided" do let(:token) { "test-access-token" } subject { described_class.new(access_token: token) } describe "#token" do it "should return the access token" do expect(subject.token).to eq(token) end end describe "#available?" do it "should return true" do expect(subject).to be_available end end end context "with HCP_CLIENT_ID only set" do let(:client_id) { "test-client-id" } describe "#initialize" do it "should raise an argument error" do expect { subject }.to raise_error(ArgumentError) end end end context "with HCP_CLIENT_SECRET only set" do let(:client_secret) { "test-client-secret" } describe "#initialize" do it "should raise an argument error" do expect { subject }.to raise_error(ArgumentError) end end end context "with HCP_CLIENT_ID and HCP_CLIENT_SECRET set" do let(:client_secret) { "test-client-secret" } let(:client_id) { "test-client-id" } let(:token) { "test-access-token" } let(:expires_at) { Time.now.to_i + 10 } let(:token_response) { double(:token_response, token: token, expires_at: expires_at) } let(:client) { double(:client, client_credentials: client_credentials) } let(:client_credentials) { double(:client_credentials, get_token: token_response) } let(:retry_token) { "retry-test-access-token" } let(:retry_expires_at) { Time.now.to_i + 10 } let(:retry_token_response) { double(:token_response, token: retry_token, expires_at: retry_expires_at) } let(:retry_client_credentials) { double(:client_credentials, get_token: retry_token_response) } before do allow(OAuth2::Client).to receive(:new).and_return(client) end describe "#token" do it "should return the access token" do expect(subject.token).to eq(token) end context "with expired token" do let(:expires_at) { Time.now.to_i - 5 } before do expect(client).to receive(:client_credentials).and_return(client_credentials) expect(client).to receive(:client_credentials).and_return(retry_client_credentials) end it "should return the updated access token" do subject.token # to seed the internal value expect(subject.token).to eq(retry_token) end end it "should properly configure the oauth2 client" do expect(OAuth2::Client).to receive(:new).with(client_id, client_secret, hash_including( site: described_class.const_get(:DEFAULT_AUTH_URL), authorize_url: described_class.const_get(:DEFAULT_AUTH_PATH), token_url: described_class.const_get(:DEFAULT_TOKEN_PATH), )).and_return(client) expect(subject.token).to eq(token) end context "with HCP_AUTH_URL set" do let(:auth_url) { "https://example.com" } it "should properly configure the oauth2 client" do expect(OAuth2::Client).to receive(:new).with(client_id, client_secret, hash_including( site: auth_url, authorize_url: described_class.const_get(:DEFAULT_AUTH_PATH), token_url: described_class.const_get(:DEFAULT_TOKEN_PATH), )).and_return(client) expect(subject.token).to eq(token) end end context "with HCP_AUTH_PATH set" do let(:auth_path) { "/auth/custom" } it "should properly configure the oauth2 client" do expect(OAuth2::Client).to receive(:new).with(client_id, client_secret, hash_including( site: described_class.const_get(:DEFAULT_AUTH_URL), authorize_url: auth_path, token_url: described_class.const_get(:DEFAULT_TOKEN_PATH), )).and_return(client) expect(subject.token).to eq(token) end end context "with HCP_TOKEN_PATH set" do let(:token_path) { "/token/custom" } it "should properly configure the oauth2 client" do expect(OAuth2::Client).to receive(:new).with(client_id, client_secret, hash_including( site: described_class.const_get(:DEFAULT_AUTH_URL), authorize_url: described_class.const_get(:DEFAULT_AUTH_PATH), token_url: token_path, )).and_return(client) expect(subject.token).to eq(token) end end end describe "#available?" do it "should return true" do expect(subject).to be_available end end end end end vagrant_cloud-3.1.3/spec/unit/vagrant_cloud/box/000077500000000000000000000000001477154370500217145ustar00rootroot00000000000000vagrant_cloud-3.1.3/spec/unit/vagrant_cloud/box/provider_spec.rb000066400000000000000000000460111477154370500251070ustar00rootroot00000000000000require 'spec_helper' require 'vagrant_cloud' describe VagrantCloud::Box::Provider do let(:version) { double("version") } let(:provider_name) { "PROVIDER_NAME" } let(:box_username) { double("box_username") } let(:box_name) { double("box_name") } let(:version_version) { double("version_version") } let(:box_url) { double("box_url") } let(:architecture) { "BOX_ARCHITECTURE" } let(:subject) { described_class.new( version: version, name: provider_name, architecture: architecture ) } before do allow(version).to receive(:is_a?).with(VagrantCloud::Box::Version).and_return(true) allow(version).to receive_message_chain(:box, :username).and_return(box_username) allow(version).to receive_message_chain(:box, :name).and_return(box_name) allow(version).to receive(:version).and_return(version_version) end describe "#initialize" do it "should require a version" do expect { described_class.new(name: provider_name) }.to raise_error(ArgumentError) end it "should require a name" do expect { described_class.new(version: version) }.to raise_error(ArgumentError) end end describe "#delete" do context "when provdier does not exist" do before { allow(subject).to receive(:exist?).and_return(false) } it "should not request deletion" do expect(version).not_to receive(:box) subject.delete end it "should return nil" do expect(subject.delete).to be_nil end end context "when provider does exist" do before do allow(subject).to receive(:exist?).and_return(true) allow(version).to receive_message_chain(:providers, :dup).and_return([]) allow(version).to receive(:clean).with(data: {providers: []}) end it "should request deletion" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_delete) subject.delete end it "should send box username" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_delete). with(hash_including(username: box_username)) subject.delete end it "should send box name" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_delete). with(hash_including(name: box_name)) subject.delete end it "should send version number" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_delete). with(hash_including(version: version_version)) subject.delete end it "should send provider_name" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_delete). with(hash_including(provider: provider_name)) subject.delete end it "should send architecture" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_delete). with(hash_including(architecture: architecture)) subject.delete end it "should return nil" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_delete) expect(subject.delete).to be_nil end it "should remove itself from the versions provider collection" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_delete) subject.delete end end end describe "#upload" do let(:response) { {upload_path: upload_path} } let(:response_direct) { {upload_path: upload_path, callback: callback_url} } let(:upload_path) { double("upload_path") } let(:callback_url) { double("callback-url", to_str: callback) } let(:callback) { "callback_destination" } let(:callback_proc) { double("callback-proc", call: nil) } before do allow(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_upload). and_return(response) allow(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_upload_direct). and_return(response_direct) allow(version).to receive_message_chain(:box, :tag).and_return("org/box") allow(version).to receive(:version).and_return("1.0") end context "when provider does not exist" do before { allow(subject).to receive(:exist?).and_return(false) } it "should error" do expect { subject.upload }.to raise_error(VagrantCloud::Error::BoxError::ProviderNotFoundError) end context "when direct upload is enabled" do it "should error" do expect { subject.upload(direct: true) }. to raise_error(VagrantCloud::Error::BoxError::ProviderNotFoundError) end end end context "when provider exists" do before { allow(subject).to receive(:exist?).and_return(true) } it "should error if path and block are both provided" do expect { subject.upload(path: "/") {} }.to raise_error(ArgumentError) end it "should return the upload path" do expect(subject.upload).to eq(upload_path) end context "with path provided" do let(:path) { "PATH" } before { allow(File).to receive(:open).with(path, any_args) } context "when path does not exist" do before { allow(File).to receive(:exist?).with(path).and_return(false) } it "should error" do expect { subject.upload(path: path) }.to raise_error(Errno::ENOENT) end end context "when path does exist" do before { allow(File).to receive(:exist?).with(path).and_return(true) } it "should make request for upload" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_upload). and_return(response) subject.upload(path: path) end it "should upload the path" do expect(File).to receive(:open).with(path, any_args).and_yield(double("file")) expect(Excon).to receive(:put).with(upload_path, any_args) subject.upload(path: path) end it "should return self" do expect(subject.upload(path: path)).to eq(subject) end end end context "with block provided" do it "should make request for upload" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_upload). and_return(response) subject.upload { |url| } end it "should yield the upload path" do subject.upload do |url| expect(url).to eq(upload_path) end end it "should return result of block" do expect(subject.upload { |u| :test }).to eq(:test) end end end context "with direct option set" do before do allow(subject).to receive(:exist?).and_return(true) allow(Excon).to receive(:put) allow(Excon).to receive(:post) end it "should error if path and block are both provided" do expect { subject.upload(path: "/", direct: true) {} }.to raise_error(ArgumentError) end it "should return a DirectUpload" do expect(subject.upload(direct: true)).to be_a(VagrantCloud::Box::Provider::DirectUpload) end it "should include an upload_url and callback_url in result" do result = subject.upload(direct: true) expect(result.upload_url).to eq(upload_path) expect(result.callback_url).to eq(callback_url) expect(result.callback).to be_a(Proc) end context "with path provided" do let(:path) { "PATH" } before do allow(File).to receive(:open).with(path, any_args) allow(version).to receive_message_chain(:box, :organization, :account, :client, :request). with(method: :put, path: callback) end context "when path does not exist" do before { allow(File).to receive(:exist?).with(path).and_return(false) } it "should error" do expect { subject.upload(path: path, direct: true) }.to raise_error(Errno::ENOENT) end end context "when path does exist" do before { allow(File).to receive(:exist?).with(path).and_return(true) } it "should make request for direct upload" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_upload_direct). and_return(response_direct) subject.upload(path: path, direct: true) end it "should upload the path" do expect(File).to receive(:open).with(path, any_args).and_yield(double("file")) expect(version).to receive_message_chain(:box, :organization, :account, :client, :request). with(method: :put, path: callback) subject.upload(path: path, direct: true) end it "should request the callback" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :request). with(method: :put, path: callback) subject.upload(path: path, direct: true) end it "should return self" do expect(subject.upload(path: path, direct: true)).to eq(subject) end end end context "with block provided" do before do allow(version).to receive_message_chain(:box, :organization, :account, :client, :request). with(method: :put, path: callback) end it "should make request for upload" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_upload_direct). and_return(response_direct) subject.upload(direct: true) { |du| } end it "should yield the upload path" do subject.upload(direct: true) do |du| expect(du).to eq(upload_path) end end it "should request the callback" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :request). with(method: :put, path: callback) subject.upload(direct: true) {|_|} end it "should return result of block" do expect(subject.upload(direct: true) { |du| :test }).to eq(:test) end end end end describe "#exist?" do let(:subject) { described_class.new(version: version, name: provider_name, created_at: created_at) } context "with created_at attribute set" do let(:created_at) { Time.now.to_s } it "should be true" do expect(subject.exist?).to be_truthy end end context "with created_at attribute unset" do let(:created_at) { nil } it "should be false" do expect(subject.exist?).to be_falsey end end end describe "#dirty?" do context "when provider does not exist" do before { allow(subject).to receive(:exist?).and_return(false) } it "should be true" do expect(subject.dirty?).to be_truthy end end context "when provider does exist" do before { allow(subject).to receive(:exist?).and_return(true) } it "should be false" do expect(subject.dirty?).to be_falsey end context "with modified attribute" do before { subject.url = "test" } it "should be true" do expect(subject.dirty?).to be_truthy end end end end describe "#save" do before { allow(subject).to receive(:save_provider) } context "when provider is not dirty" do before { allow(subject).to receive(:dirty?).and_return(false) } it "should not save provider" do expect(subject).not_to receive(:save_provider) subject.save end it "should return self" do expect(subject.save).to eq(subject) end end context "when provider is dirty" do before { allow(subject).to receive(:dirty?).and_return(true) } it "should save the provider" do expect(subject).to receive(:save_provider) subject.save end it "should return self" do expect(subject.save).to eq(subject) end end end describe "#save_provider" do let(:checksum) { double("checksum") } let(:checksum_type) { double("checksum_type") } before do allow(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_create). and_return({}) allow(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_update). and_return({}) end context "when provider exists" do before { allow(subject).to receive(:exist?).and_return(true) } it "should request an update" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_update). and_return({}) subject.send(:save_provider) end it "should return self" do expect(subject.send(:save_provider)).to eq(subject) end it "should include box organization" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_update). with(hash_including(username: box_username)).and_return({}) subject.send(:save_provider) end it "should include box name" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_update). with(hash_including(name: box_name)).and_return({}) subject.send(:save_provider) end it "should include version" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_update). with(hash_including(version: version_version)).and_return({}) subject.send(:save_provider) end it "should include provider" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_update). with(hash_including(provider: provider_name)).and_return({}) subject.send(:save_provider) end it "should include checksum" do subject.checksum = checksum expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_update). with(hash_including(checksum: checksum)).and_return({}) subject.send(:save_provider) end it "should include checksum_type" do subject.checksum_type = checksum_type expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_update). with(hash_including(checksum_type: checksum_type)).and_return({}) subject.send(:save_provider) end it "should include architecture" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_update). with(hash_including(architecture: architecture)).and_return({}) subject.send(:save_provider) end it "should include new architecture" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_update). with(hash_including(new_architecture: architecture)).and_return({}) subject.send(:save_provider) end it "should include URL" do subject.url = box_url expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_update). with(hash_including(url: box_url)).and_return({}) subject.send(:save_provider) end context "when architecture is changed" do let(:new_architecture) { "NEW_BOX_ARCHITECTURE" } it "should include original and new architectures" do subject.architecture = new_architecture expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_update). with(hash_including(new_architecture: new_architecture, architecture: architecture)).and_return({}) subject.send(:save_provider) end end end context "when provider does not exist" do before { allow(subject).to receive(:exist?).and_return(false) } it "should request a creation" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_create). and_return({}) subject.send(:save_provider) end it "should return self" do expect(subject.send(:save_provider)).to eq(subject) end it "should include box organization" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_create). with(hash_including(username: box_username)).and_return({}) subject.send(:save_provider) end it "should include box name" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_create). with(hash_including(name: box_name)).and_return({}) subject.send(:save_provider) end it "should include version" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_create). with(hash_including(version: version_version)).and_return({}) subject.send(:save_provider) end it "should include provider" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_create). with(hash_including(provider: provider_name)).and_return({}) subject.send(:save_provider) end it "should include checksum" do subject.checksum = checksum expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_create). with(hash_including(checksum: checksum)).and_return({}) subject.send(:save_provider) end it "should include checksum_type" do subject.checksum_type = checksum_type expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_create). with(hash_including(checksum_type: checksum_type)).and_return({}) subject.send(:save_provider) end it "should include architecture" do expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_create). with(hash_including(architecture: architecture)).and_return({}) subject.send(:save_provider) end it "should include URL" do subject.url = box_url expect(version).to receive_message_chain(:box, :organization, :account, :client, :box_version_provider_create). with(hash_including(url: box_url)).and_return({}) subject.send(:save_provider) end end end end vagrant_cloud-3.1.3/spec/unit/vagrant_cloud/box/version_spec.rb000066400000000000000000000304541477154370500247460ustar00rootroot00000000000000require 'spec_helper' require 'vagrant_cloud' describe VagrantCloud::Box::Version do let(:box) { double("box", username: box_username, name: box_name, tag: "#{box_username}/#{box_name}") } let(:box_username) { double("box_username") } let(:box_name) { double("box_name") } let(:version) { "1.0.0" } let(:subject) { described_class.new(box: box, version: version) } before { allow(box).to receive(:is_a?).with(VagrantCloud::Box).and_return(true) } describe "#initialize" do it "should require a box" do expect { described_class.new }.to raise_error(ArgumentError) end it "should require box argument be box type" do expect { described_class.new(box: nil) }.to raise_error(TypeError) end it "should load providers" do instance = described_class.new(box: box, version: version, providers: [{name: "test"}]) expect(instance.providers).not_to be_empty expect(instance.providers.first).to be_a(VagrantCloud::Box::Provider) end end describe "#delete" do before do allow(box).to receive(:versions).and_return([]) allow(box).to receive_message_chain(:organization, :account, :client, :box_version_delete) end it "should not delete if version does not exist" do expect(box).not_to receive(:organization) subject.delete end it "should return nil" do expect(subject.delete).to be_nil end context "when version exists" do before do allow(subject).to receive(:exist?).and_return(true) allow(box).to receive(:clean) end it "should make a version deletion request" do expect(box).to receive_message_chain(:organization, :account, :client, :box_version_delete) subject.delete end it "should include box username and name" do expect(box).to receive_message_chain(:organization, :account, :client, :box_version_delete). with(hash_including(username: box_username, name: box_name)) subject.delete end it "should include the version" do expect(box).to receive_message_chain(:organization, :account, :client, :box_version_delete). with(hash_including(version: version)) subject.delete end it "should delete the version from the box versions" do versions = double("versions") expect(versions).to receive(:dup).and_return(versions) expect(box).to receive(:versions).and_return(versions) expect(versions).to receive(:delete).with(subject).and_return(versions) expect(box).to receive(:clean).with(data: {versions: versions}) subject.delete end end end describe "#release" do context "when version is released" do before { allow(subject).to receive(:released?).and_return(true) } it "should error" do expect { subject.release }.to raise_error(VagrantCloud::Error::BoxError::VersionStatusChangeError) end end context "when version has not been saved" do before { allow(subject).to receive(:exist?).and_return(false) } it "should error" do expect { subject.release }.to raise_error(VagrantCloud::Error::BoxError::VersionStatusChangeError) end end context "when version is saved and not released" do before do allow(subject).to receive(:exist?).and_return(true) allow(subject).to receive(:released?).and_return(false) end it "should send request to release version" do expect(box).to receive_message_chain(:organization, :account, :client, :box_version_release). and_return({}) subject.release end it "should include box username, box name, and version" do expect(box).to receive_message_chain(:organization, :account, :client, :box_version_release). with(hash_including(username: box_username, name: box_name, version: version)).and_return({}) subject.release end it "should update status with value provided in result" do expect(box).to receive_message_chain(:organization, :account, :client, :box_version_release). and_return({status: "active"}) subject.release expect(subject.status).to eq("active") end it "should return self" do expect(box).to receive_message_chain(:organization, :account, :client, :box_version_release). and_return({}) expect(subject.release).to eq(subject) end end end describe "#revoke" do context "when version is not released" do before { allow(subject).to receive(:released?).and_return(false) } it "should error" do expect { subject.revoke }.to raise_error(VagrantCloud::Error::BoxError::VersionStatusChangeError) end end context "when version is released" do before { allow(subject).to receive(:released?).and_return(true) } it "should send request to revoke release" do expect(box).to receive_message_chain(:organization, :account, :client, :box_version_revoke). and_return({}) subject.revoke end it "should include the box username, box name, and version" do expect(box).to receive_message_chain(:organization, :account, :client, :box_version_revoke). with(hash_including(username: box_username, name: box_name, version: version)).and_return({}) subject.revoke end it "should update status with value provided in result" do expect(box).to receive_message_chain(:organization, :account, :client, :box_version_revoke). and_return({status: "inactive"}) subject.revoke expect(subject.status).to eq("inactive") end it "should return self" do expect(box).to receive_message_chain(:organization, :account, :client, :box_version_revoke). and_return({}) expect(subject.revoke).to eq(subject) end end end describe "#add_provider" do it "should create a new provider" do expect(subject.add_provider("test")).to be_a(VagrantCloud::Box::Provider) end it "should add provider to providers collection" do pv = subject.add_provider("test") expect(subject.providers).to include(pv) end it "should raise error when provider exists" do subject.add_provider("test") expect { subject.add_provider("test") }. to raise_error(VagrantCloud::Error::BoxError::VersionProviderExistsError) end context "with architecture" do it "should add provider to collection and include architecture" do pv = subject.add_provider("test", "test-arch") expect(subject.providers).to include(pv) expect(pv.architecture).to eq("test-arch") end it "should add multiple same providers with different architectures" do ["arch1", "arch2", "arch3"].each do |arch| pv = subject.add_provider("test", arch) expect(subject.providers).to include(pv) expect(pv.architecture).to eq(arch) end end it "should raise error when provider exists" do subject.add_provider("test", "test-arch") expect { subject.add_provider("test", "test-arch") }. to raise_error(VagrantCloud::Error::BoxError::VersionProviderExistsError) end it "should raise error when adding existing provider without architecture" do subject.add_provider("test", "test-arch") expect { subject.add_provider("test") }. to raise_error(VagrantCloud::Error::BoxError::VersionProviderExistsError) end end end describe "#dirty?" do context "when version does not exist" do before { allow(subject).to receive(:exist?).and_return(false) } it "should be true" do expect(subject.dirty?).to be_truthy end end context "when version does exist" do before { allow(subject).to receive(:exist?).and_return(true) } it "should be false" do expect(subject.dirty?).to be_falsey end context "with modified attribute" do before { subject.description = "test" } it "should be true" do expect(subject.dirty?).to be_truthy end end context "with deep check" do it "should be false" do expect(subject.dirty?(deep: true)).to be_falsey end context "with modified attribute" do before { subject.description = "test" } it "should be true" do expect(subject.dirty?(deep: true)).to be_truthy end end context "with dirty provider in providers collection" do before { subject.add_provider("test") } it "should be true" do expect(subject.dirty?(deep: true)).to be_truthy end end end end end describe "#exist?" do let(:subject) { described_class.new(box: box, version: version, created_at: created_at) } context "with created_at attribute set" do let(:created_at) { Time.now.to_s } it "should be true" do expect(subject.exist?).to be_truthy end end context "with created_at attribute unset" do let(:created_at) { nil } it "should be false" do expect(subject.exist?).to be_falsey end end end describe "#save" do before do allow(subject).to receive(:save_version) allow(subject).to receive(:save_provdiers) end it "should return self" do expect(subject.save).to eq(subject) end context "when version is dirty" do before do allow(subject).to receive(:dirty?).and_return(true) allow(subject).to receive(:dirty?).with(deep: true).and_return(false) end it "should save the version" do expect(subject).to receive(:save_version) subject.save end end context "when version is clean" do before { allow(subject).to receive(:dirty?).and_return(false) } it "should not save the version" do expect(subject).not_to receive(:save_version) subject.save end end context "when dirty provider in providers collection" do before { subject.add_provider("test") } it "should save the providers" do expect(subject).to receive(:save_providers) subject.save end end end describe "#save_version" do context "when version exists" do before { allow(subject).to receive(:exist?).and_return(true) } it "should request a version update" do expect(box).to receive_message_chain(:organization, :account, :client, :box_version_update). and_return({}) subject.send(:save_version) end it "should include the box username, box name, version, and description" do expect(box).to receive_message_chain(:organization, :account, :client, :box_version_update). with(hash_including(username: box_username, name: box_name, version: version, description: subject.description)). and_return({}) subject.send(:save_version) end it "should return self" do expect(box).to receive_message_chain(:organization, :account, :client, :box_version_update). and_return({}) expect(subject.send(:save_version)).to eq(subject) end end context "when version does not exist" do before { allow(subject).to receive(:exist?).and_return(false) } it "should request a version create" do expect(box).to receive_message_chain(:organization, :account, :client, :box_version_create). and_return({}) subject.send(:save_version) end it "should include the box username, box name, version, and description" do expect(box).to receive_message_chain(:organization, :account, :client, :box_version_create). with(hash_including(username: box_username, name: box_name, version: version, description: subject.description)). and_return({}) subject.send(:save_version) end it "should return self" do expect(box).to receive_message_chain(:organization, :account, :client, :box_version_create). and_return({}) expect(subject.send(:save_version)).to eq(subject) end end end describe "#save_providers" do it "should return self" do expect(subject.send(:save_providers)).to eq(subject) end it "should save the providers" do pv = subject.add_provider("test") expect(pv).to receive(:save) subject.send(:save_providers) end end end vagrant_cloud-3.1.3/spec/unit/vagrant_cloud/box_spec.rb000066400000000000000000000241351477154370500232600ustar00rootroot00000000000000require 'spec_helper' require 'vagrant_cloud' describe VagrantCloud::Box do let(:organization) { VagrantCloud::Organization.new(account: account, username: organization_name) } let(:organization_name) { "ORG_NAME" } let(:account) { double("account") } let(:name) { "BOX_NAME" } let(:subject) { described_class.new(organization: organization, name: name) } before do allow(account).to receive_message_chain(:client, :box_get).and_return(versions: []) end describe "#initialize" do it "should require a name" do expect { described_class.new(organization: organization) }. to raise_error(ArgumentError) end it "should require an organization" do expect { described_class.new(name: name) }. to raise_error(ArgumentError) end it "should create new instance with organization and name" do expect { described_class.new(name: name, organization: organization) }. not_to raise_error end end describe "#short_description" do it "should be mutable" do expect(subject.short_description).to be_nil subject.short_description = "test" expect(subject.short_description).to eq("test") end end describe "#description" do it "should be mutable" do expect(subject.description).to be_nil subject.description = "test" expect(subject.description).to eq("test") end end describe "#private" do it "should be mutable" do expect(subject.private).to be_nil subject.private = true expect(subject.private).to be_truthy end end describe "#delete" do it "should return nil" do expect(subject.delete).to be_nil end it "should not request to delete box that does not exist" do expect(organization).not_to receive(:account) subject.delete end context "when box exists" do before { allow(subject).to receive(:exist?).and_return(true) } it "should request box deletion" do expect(account).to receive_message_chain(:client, :box_delete) subject.delete end end end describe "#add_version" do it "should create a new version" do expect(subject.add_version("1.0.0")).to be_a(VagrantCloud::Box::Version) end it "should add new version to the versions collection" do v = subject.add_version("1.0.0") expect(subject.versions).to include(v) end it "should error when adding an existing version" do subject.add_version("1.0.0") expect { subject.add_version("1.0.0") }. to raise_error(VagrantCloud::Error::BoxError::VersionExistsError) end end describe "#dirty?" do it "should be true when box does not exist" do expect(subject.exist?).to be_falsey expect(subject.dirty?).to be_truthy end context "when box exists" do before { allow(subject).to receive(:exist?).and_return(true) } it "should be false" do expect(subject.dirty?).to be_falsey end context "when attribute is modified" do before { subject.description = "test" } it "should be true" do expect(subject.dirty?).to be_truthy end it "should be true on attribute name check" do expect(subject.dirty?(:description)).to be_truthy end end context "deep check" do it "should be false" do expect(subject.dirty?(deep: true)).to be_falsey end context "when a version is added" do before { subject.add_version("1.0.0") } it "should be true" do expect(subject.dirty?(deep: true)).to be_truthy end end end end end describe "#exist?" do it "should be false when created_at is unset" do expect(subject.created_at).to be_falsey expect(subject.exist?).to be_falsey end context "when created_at is set" do before { subject.clean(data: {created_at: Time.now.to_s}) } it "should be true" do expect(subject.exist?).to be_truthy end end end describe "#versions_on_demand" do context "when box exists" do before { allow(subject).to receive(:exist?).and_return(true) } it "should load versions when called" do expect(account).to receive_message_chain(:client, :box_get).and_return(versions: []) subject.versions_on_demand end it "should not load versions after initial load" do expect(subject.dirty?(:versions)).to be_falsey expect(account).to receive_message_chain(:client, :box_get).and_return(versions: []) subject.versions_on_demand expect(account).not_to receive(:client) subject.versions_on_demand end end context "when box does not exist" do before { allow(subject).to receive(:exist?).and_return(false) } it "should not load versions when called" do expect(account).not_to receive(:client) subject.versions_on_demand end it "should not load versions after initial load" do expect(subject.dirty?(:versions)).to be_falsey expect(account).not_to receive(:client) subject.versions_on_demand expect(account).not_to receive(:client) subject.versions_on_demand end end end describe "#save" do before do allow(subject).to receive(:save_versions) allow(subject).to receive(:save_box) end it "should return self" do expect(subject.save).to eq(subject) end context "when box does not exist" do before { allow(subject).to receive(:exist?).and_return(false) } it "should save the box" do expect(subject).to receive(:save_box).ordered expect(subject).to receive(:save_versions).ordered subject.save end end context "when box includes unsaved versions" do before { subject.add_version("1.0.0") } it "should save the versions" do expect(subject).to receive(:save_versions) subject.save end end context "when box exists" do before { allow(subject).to receive(:exist?).and_return(true) } it "should not save anything" do expect(subject).not_to receive(:save_box) expect(subject).not_to receive(:save_versions) subject.save end context "when box includes unsaved versions" do before { subject.add_version("1.0.0") } it "should save the versions" do expect(subject).to receive(:save_versions) subject.save end end context "when box attribute is updated" do before { subject.description = "test" } it "should save the box" do expect(subject).to receive(:save_box) subject.save end end end end describe "#save_box" do context "when box exists" do before { allow(subject).to receive(:exist?).and_return(true) } it "should return self" do expect(account).to receive_message_chain(:client, :box_update).and_return({}) expect(subject.send(:save_box)).to eq(subject) end it "should request a box update" do expect(account).to receive_message_chain(:client, :box_update).and_return({}) subject.send(:save_box) end it "should include the organization name" do expect(account).to receive_message_chain(:client, :box_update). with(hash_including(username: organization_name)).and_return({}) subject.send(:save_box) end it "should include the name" do expect(account).to receive_message_chain(:client, :box_update). with(hash_including(name: name)).and_return({}) subject.send(:save_box) end it "should include the short description" do expect(account).to receive_message_chain(:client, :box_update). with(hash_including(short_description: subject.short_description)).and_return({}) subject.send(:save_box) end it "should include the description" do expect(account).to receive_message_chain(:client, :box_update). with(hash_including(description: subject.description)).and_return({}) subject.send(:save_box) end it "should include the box privacy" do expect(account).to receive_message_chain(:client, :box_update). with(hash_including(is_private: subject.private)).and_return({}) subject.send(:save_box) end end context "when box does not exist" do before { allow(subject).to receive(:exist?).and_return(false) } it "should request a box create" do expect(account).to receive_message_chain(:client, :box_create).and_return({}) subject.send(:save_box) end it "should include the organization name" do expect(account).to receive_message_chain(:client, :box_create). with(hash_including(username: organization_name)).and_return({}) subject.send(:save_box) end it "should include the name" do expect(account).to receive_message_chain(:client, :box_create). with(hash_including(name: name)).and_return({}) subject.send(:save_box) end it "should include the short description" do expect(account).to receive_message_chain(:client, :box_create). with(hash_including(short_description: subject.short_description)).and_return({}) subject.send(:save_box) end it "should include the description" do expect(account).to receive_message_chain(:client, :box_create). with(hash_including(description: subject.description)).and_return({}) subject.send(:save_box) end it "should include the box privacy" do expect(account).to receive_message_chain(:client, :box_create). with(hash_including(is_private: subject.private)).and_return({}) subject.send(:save_box) end end end describe "#save_versions" do it "should return self" do expect(subject.send(:save_versions)).to eq(subject) end it "should call save on any versions" do subject.add_version("1.0.0") expect(account).to receive_message_chain(:client, :box_version_create). and_return({}) subject.send(:save_versions) end end end vagrant_cloud-3.1.3/spec/unit/vagrant_cloud/client_spec.rb000066400000000000000000001514131477154370500237460ustar00rootroot00000000000000require 'spec_helper' require 'vagrant_cloud' describe VagrantCloud::Client do let(:connection) { double("connection", request: nil) } let(:oauth_client) { double("oauth", client_credentials: client_credentials) } let(:client_credentials) { double("client_credentials", get_token: token) } let(:token) { VagrantCloud::Auth::HCPToken.new(token: "stub", expires_at: Time.now.to_i + 100) } before do allow(OAuth2::Client).to receive(:new).and_return(oauth_client) end describe "#intialize" do context "with no arguments" do it "should have #url_base set" do expect(subject.url_base).not_to be_nil end it "should have #retry_count set" do expect(subject.retry_count).not_to be_nil end it "should have #retry_interval set" do expect(subject.retry_interval).not_to be_nil end it "should have #instrumentor set" do expect(subject.instrumentor).not_to be_nil end end context "with arguments" do it "should set #url_base" do subject = described_class.new(url_base: "http://example.com") expect(subject.url_base).to eq("http://example.com") end it "should set #retry_count" do subject = described_class.new(retry_count: 1) expect(subject.retry_count).to eq(1) end it "should set #retry_interval" do subject = described_class.new(retry_interval: 1) expect(subject.retry_interval).to eq(1) end it "should set #instrumentor" do i = double("instrumentor") subject = described_class.new(instrumentor: i) expect(subject.instrumentor).to eq(i) end it "should set #access_token" do subject = described_class.new(access_token: "token") expect(subject.access_token).to eq("token") end end end describe "#parse_json" do it "should return result with symbolized keys" do expect(subject.send(:parse_json, {"test" => :val}.to_json)).to eq({test: "val"}) end end describe "#clean_parameters" do it "should remove Data::Nil values from Array" do val = [1, 2, VagrantCloud::Data::Nil, 3] result = subject.send(:clean_parameters, val) expect(result).to eq([1, 2, 3]) end it "should remove Data::Nil values from nested Arrays" do val = [1, 2, VagrantCloud::Data::Nil, [1, 2, VagrantCloud::Data::Nil], 3] result = subject.send(:clean_parameters, val) expect(result).to eq([1, 2, [1, 2], 3]) end it "should remove Data::Nil values from Hash" do val = {a: 1, b: 2, c: VagrantCloud::Data::Nil, d: 3} result = subject.send(:clean_parameters, val) expect(result).to eq({a: 1, b: 2, d: 3}) end it "should remove Data::Nil values from nested Hashes" do val = {a: 1, b: 2, c: VagrantCloud::Data::Nil, d: {a: 1, b: VagrantCloud::Data::Nil}, e: 3} result = subject.send(:clean_parameters, val) expect(result).to eq({a: 1, b: 2, d: {a: 1}, e: 3}) end it "should remove Data::Nil values from nested Arrays and Hashes" do val = {a: 1, b: [1, 2, VagrantCloud::Data::Nil, {a: 1, b: VagrantCloud::Data::Nil}], c: 2} result = subject.send(:clean_parameters, val) expect(result).to eq({a: 1, b: [1, 2, {a: 1}], c: 2}) end end describe "#with_connection" do it "should provide the connection to the block" do subject.with_connection do |c| expect(c).to be_a(Excon::Connection) end end it "should gate access to the connection" do fiber = Fiber.new do subject.with_connection { Fiber.yield } end fiber.resume expect { subject.with_connection(wait: false) {} }. to raise_error(VagrantCloud::Error::ClientError::ConnectionLockedError) fiber.resume expect { subject.with_connection {} }.not_to raise_error end end describe "#request" do let(:response) { double("response", body: body, status: status) } let(:body) { "" } let(:status) { 200 } before do allow(subject).to receive(:with_connection). and_yield(connection) allow(connection).to receive(:request). and_return(response) end it "should require path to be set" do expect { subject.request }.to raise_error(ArgumentError) end it "should default to GET method" do expect(connection).to receive(:request). with(hash_including(method: :get)). and_return(response) subject.request(path: "/") end it "should use method provided" do expect(connection).to receive(:request). with(hash_including(method: :post)). and_return(response) subject.request(path: "/", method: :post) end it "should set a request ID header" do expect(connection).to receive(:request) do |args| expect(args.dig(:headers, "X-Request-Id")).not_to be_nil response end subject.request(path: "/") end context "path prefixing" do it "should prefix the v2 API by default" do expect(connection).to receive(:request) do |args| expect(args[:path]).to start_with(VagrantCloud::Client::API_V2_PATH) response end subject.request(path: "/") end it "should prefix the v1 API when requested" do expect(connection).to receive(:request) do |args| expect(args[:path]).to start_with(VagrantCloud::Client::API_V1_PATH) response end subject.request(path: "/", api_version: 1) end it "should prefix the v2 API when requested" do expect(connection).to receive(:request) do |args| expect(args[:path]).to start_with(VagrantCloud::Client::API_V2_PATH) response end subject.request(path: "/", api_version: 2) end it "should not add a prefix if the v1 API prefix already exists" do expect(connection).to receive(:request).with(hash_including(path: "/api/v1/test/path")) subject.request(path: "/api/v1/test/path") end it "should not add a prefix if the v2 API prefix already exists" do expect(connection).to receive(:request).with(hash_including(path: "/api/v2/test/path")) subject.request(path: "/api/v2/test/path") end context "when base path is defined" do let(:base_path) { "/custom/path" } subject { described_class.new(url_base: "http://example.com#{base_path}") } it "should suffix API to base path" do expect(connection).to receive(:request).with(hash_including(path: "#{base_path}/api/v1/test")) subject.request(path: "/test", api_version: 1) end it "should not modify path if base path is detected" do expect(connection).to receive(:request).with(hash_including(path: "#{base_path}/custom/request")) subject.request(path: "/custom/path/custom/request") end end end context "when response body is valid json" do let(:body) { {result: true}.to_json } it "should parse the return the JSON value" do expect(subject.request(path: "/")).to eq({result: true}) end end context "with parameters" do [:get, :head, :delete].each do |request_method| it "should use query parameters for #{request_method.to_s.upcase} request method" do expect(connection).to receive(:request).with(hash_including(query: anything)). and_return(response) subject.request(path: "/", method: request_method, params: {testing: true}) end end it "should use JSON body parameters for other request methods" do expect(connection).to receive(:request).with(hash_including(body: anything)). and_return(response) subject.request(path: "/", method: :post, params: {testing: true}) end it "should pass parameter hash through in request" do expect(connection).to receive(:request).with(hash_including(query: {testing: true})). and_return(response) subject.request(path: "/", params: {testing: true}) end it "should remove parameters that were not explicitly set" do expect(connection).to receive(:request).with(hash_including(query: {testing: true})). and_return(response) subject.request(path: "/", params: {testing: true, invalid: VagrantCloud::Data::Nil}) end end context "idempotent information" do [:get, :head].each do |request_method| it "should set idempotent options for #{request_method.to_s.upcase} request method" do expect(connection).to receive(:request). with(hash_including(idempotent: anything, retry_limit: anything, retry_interval: anything)). and_return(response) subject.request(path: "/", method: request_method) end end it "should not set idempotent options for other request methods" do expect(connection).to receive(:request) do |args| expect(args.keys).not_to include(:idempotent) expect(args.keys).not_to include(:retry_limit) expect(args.keys).not_to include(:retry_interval) response end subject.request(path: "/", method: :post) end end context "with errors" do context "with request errors" do let(:response) { double("response", status: 403, body: '{"errors": ["forbidden request"]}') } before { expect(connection).to receive(:request).and_raise(Excon::Error::Forbidden.new("forbidden", nil, response)) } it "should raise a wrapped error" do expect { subject.request(path: "/") }.to raise_error(VagrantCloud::Error::ClientError::RequestError) end it "should set the error message from the content" do err = nil subject.request(path: "/") rescue => err expect(err.error_arr).to eq(["forbidden request"]) end it "should set the error status code" do err = nil subject.request(path: "/") rescue => err expect(err.error_code).to eq(403) end end end end describe "#clone" do it "should create a new clone" do expect(subject.clone).to be_a(described_class) end it "should be a new instance" do expect(subject.clone).not_to be(subject) end it "should clone custom settings" do subject = described_class.new(url_base: "http://example.com") expect(subject.clone.url_base).to eq("http://example.com") end it "should override the access_token when provided" do subject = described_class.new(access_token: "token") expect(subject.clone(access_token: "new-token").access_token).to eq("new-token") end end describe "#authentication_token_create" do let(:username) { double("username") } let(:password) { double("password") } let(:description) { double("description") } let(:code) { double("code") } it "should require a username" do expect { subject.authentication_token_create(password: password) }. to raise_error(ArgumentError) end it "should require a password" do expect { subject.authentication_token_create(username: username) }. to raise_error(ArgumentError) end it "should send remote request and include username and password" do expect(subject).to receive(:request) do |args| expect(args[:path]).to include("authenticate") expect(args[:method]).to eq(:post) expect(args.dig(:params, :user, :login)).to eq(username) expect(args.dig(:params, :user, :password)).to eq(password) end subject.authentication_token_create(username: username, password: password) end it "should include description and code if provided" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :token, :description)).to eq(description) expect(args.dig(:params, :two_factor, :code)).to eq(code) end subject.authentication_token_create(username: username, password: password, description: description, code: code) end it "should use v1 API" do expect(subject).to receive(:request) do |args| expect(args[:api_version]).to eq(1) end subject.authentication_token_create(username: username, password: password) end end describe "#authentication_token_delete" do it "should send delete request" do expect(subject).to receive(:request).with(hash_including(method: :delete, path: "authenticate")) subject.authentication_token_delete end it "should use v1 API" do expect(subject).to receive(:request) do |args| expect(args[:api_version]).to eq(1) end subject.authentication_token_delete end end describe "#authentication_request_2fa_code" do let(:username) { double("username") } let(:password) { double("password") } let(:delivery_method) { method("delivery_method") } let(:args) { {username: username, password: password, delivery_method: delivery_method} } it "should require a username" do args.delete(:username) expect { subject.authentication_request_2fa_code(**args) }. to raise_error(ArgumentError) end it "should require a password" do args.delete(:password) expect { subject.authentication_request_2fa_code(**args) }. to raise_error(ArgumentError) end it "should require a delivery method" do args.delete(:delivery_method) expect { subject.authentication_request_2fa_code(**args) }. to raise_error(ArgumentError) end it "should include username, password, and delivery method in request" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :two_factor, :delivery_method)).to eq(delivery_method) expect(args.dig(:params, :user, :login)).to eq(username) expect(args.dig(:params, :user, :password)).to eq(password) end subject.authentication_request_2fa_code(**args) end it "should post to the two factor request path" do expect(subject).to receive(:request).with(hash_including(method: :post, path: "two-factor/request-code")) subject.authentication_request_2fa_code(**args) end it "should use v1 API" do expect(subject).to receive(:request) do |args| expect(args[:api_version]).to eq(1) end subject.authentication_request_2fa_code(**args) end end describe "#search" do let(:query) { double("query") } let(:provider) { double("provider") } let(:sort) { double("sort") } let(:order) { "asc" } let(:limit) { 53 } let(:page) { 101 } let(:args) { {query: query, provider: provider, sort: sort, order: order, limit: limit, page: page} } it "should sent request for search" do expect(subject).to receive(:request).with(hash_including(method: :get, path: "search")) subject.search(**args) end it "should include given values within request parameters" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :q)).to eq(query) expect(args.dig(:params, :provider)).to eq(provider) expect(args.dig(:params, :sort)).to eq(sort) expect(args.dig(:params, :order)).to eq(order) expect(args.dig(:params, :limit)).to eq(limit) expect(args.dig(:params, :page)).to eq(page) end subject.search(**args) end end describe "#box_get" do before { allow(subject).to receive(:request) } it "should require username is provided" do expect { subject.box_get(name: "mybox") }.to raise_error(ArgumentError) end it "should require name is provided" do expect { subject.box_get(username: "myname") }.to raise_error(ArgumentError) end it "should send the remote request with username and name" do expect(subject).to receive(:request) do |args| expect(args[:path]).to include("myname") expect(args[:path]).to include("mybox") end subject.box_get(username: "myname", name: "mybox") end end describe "#box_create" do let(:name) { double("name") } let(:username) { double("username") } let(:description) { double("description") } let(:short_description) { double("short_description") } let(:is_private) { double("is_private") } before { allow(subject).to receive(:request) } it "should require username is provided" do expect { subject.box_create(name: name) }.to raise_error(ArgumentError) end it "should require name is provided" do expect { subject.box_create(username: username) }.to raise_error(ArgumentError) end it "should only require username and name" do expect(subject).to receive(:request) subject.box_create(username: username, name: name) end it "should include description" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :box, :description)).to eq(description) end subject.box_create(username: username, name: name, description: description) end it "should include short_description" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :box, :short_description)).to eq(short_description) end subject.box_create(username: username, name: name, short_description: short_description) end it "should include is_private" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :box, :is_private)).to eq(is_private) end subject.box_create(username: username, name: name, is_private: is_private) end end describe "#box_update" do let(:username) { double("username") } let(:name) { double("name") } let(:short_description) { double("short_description") } let(:description) { double("description") } let(:is_private) { double("is_private") } before { allow(subject).to receive(:request) } it "should require username is provided" do expect { subject.box_update(name: name) }.to raise_error(ArgumentError) end it "should require name is provided" do expect { subject.box_update(username: username) }.to raise_error(ArgumentError) end it "should only require username and name" do expect(subject).to receive(:request) subject.box_update(username: username, name: name) end it "should include description" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :box, :description)).to eq(description) end subject.box_update(username: username, name: name, description: description) end it "should include short_description" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :box, :short_description)).to eq(short_description) end subject.box_update(username: username, name: name, short_description: short_description) end it "should include is_private" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :box, :is_private)).to eq(is_private) end subject.box_update(username: username, name: name, is_private: is_private) end end describe "#box_delete" do let(:username) { double("username") } let(:name) { double("name") } before { allow(subject).to receive(:request) } it "should require username is provided" do expect { subject.box_delete(name: name) }.to raise_error(ArgumentError) end it "should require name is provided" do expect { subject.box_delete(username: username) }.to raise_error(ArgumentError) end it "should send deletion request" do expect(subject).to receive(:request) subject.box_delete(username: username, name: name) end end describe "#box_version_get" do let(:username) { double("username") } let(:name) { double("name") } let(:version) { double("version") } before { allow(subject).to receive(:request) } it "should require username is provided" do expect { subject.box_version_get(name: name, version: version) }. to raise_error(ArgumentError) end it "should require name is provided" do expect { subject.box_version_get(username: username, version: version) }. to raise_error(ArgumentError) end it "should require version is provided" do expect { subject.box_version_get(username: username, name: name) }. to raise_error(ArgumentError) end it "should request the box version" do expect(subject).to receive(:request) subject.box_version_get(username: username, name: name, version: version) end end describe "#box_version_create" do let(:username) { double("username") } let(:name) { double("name") } let(:version) { double("version") } let(:description) { double("description") } before { allow(subject).to receive(:request) } it "should require username is provided" do expect { subject.box_version_create(name: name, version: version) }. to raise_error(ArgumentError) end it "should require name is provided" do expect { subject.box_version_create(username: username, version: version) }. to raise_error(ArgumentError) end it "should require version is provided" do expect { subject.box_version_create(username: username, name: name) }. to raise_error(ArgumentError) end it "should request the version creation" do expect(subject).to receive(:request) subject.box_version_create(username: username, name: name, version: version) end it "should make request using POST method" do expect(subject).to receive(:request).with(hash_including(method: :post)) subject.box_version_create(username: username, name: name, version: version) end it "should include description if provided" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :version, :description)).to eq(description) end subject.box_version_create(username: username, name: name, version: version, description: description) end it "should include the version in the parameters" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :version, :version)).to eq(version) end subject.box_version_create(username: username, name: name, version: version) end end describe "#box_version_update" do let(:username) { double("username") } let(:name) { double("name") } let(:version) { double("version") } let(:description) { double("description") } before { allow(subject).to receive(:request) } it "should require username is provided" do expect { subject.box_version_update(name: name, version: version) }. to raise_error(ArgumentError) end it "should require name is provided" do expect { subject.box_version_update(username: username, version: version) }. to raise_error(ArgumentError) end it "should require version is provided" do expect { subject.box_version_update(username: username, name: name) }. to raise_error(ArgumentError) end it "should request the version update" do expect(subject).to receive(:request) subject.box_version_update(username: username, name: name, version: version) end it "should make request using PUT method" do expect(subject).to receive(:request).with(hash_including(method: :put)) subject.box_version_update(username: username, name: name, version: version) end it "should include description if provided" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :version, :description)).to eq(description) end subject.box_version_update(username: username, name: name, version: version, description: description) end it "should include the version in the parameters" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :version, :version)).to eq(version) end subject.box_version_update(username: username, name: name, version: version) end end describe "#box_version_delete" do let(:username) { double("username") } let(:name) { double("name") } let(:version) { double("version") } before { allow(subject).to receive(:request) } it "should require username is provided" do expect { subject.box_version_delete(name: name, version: version) }. to raise_error(ArgumentError) end it "should require name is provided" do expect { subject.box_version_delete(username: username, version: version) }. to raise_error(ArgumentError) end it "should require version is provided" do expect { subject.box_version_delete(username: username, name: name) }. to raise_error(ArgumentError) end it "should request the version delete" do expect(subject).to receive(:request) subject.box_version_delete(username: username, name: name, version: version) end it "should make request using DELETE method" do expect(subject).to receive(:request).with(hash_including(method: :delete)) subject.box_version_delete(username: username, name: name, version: version) end end describe "#box_version_release" do let(:username) { double("username") } let(:name) { double("name") } let(:version) { double("version") } before { allow(subject).to receive(:request) } it "should require username is provided" do expect { subject.box_version_release(name: name, version: version) }. to raise_error(ArgumentError) end it "should require name is provided" do expect { subject.box_version_release(username: username, version: version) }. to raise_error(ArgumentError) end it "should require version is provided" do expect { subject.box_version_release(username: username, name: name) }. to raise_error(ArgumentError) end it "should request the version release" do expect(subject).to receive(:request) do |args| expect(args[:path]).to include("release") end subject.box_version_release(username: username, name: name, version: version) end it "should make request using PUT method" do expect(subject).to receive(:request).with(hash_including(method: :put)) subject.box_version_release(username: username, name: name, version: version) end end describe "#box_version_revoke" do let(:username) { double("username") } let(:name) { double("name") } let(:version) { double("version") } before { allow(subject).to receive(:request) } it "should require username is provided" do expect { subject.box_version_revoke(name: name, version: version) }. to raise_error(ArgumentError) end it "should require name is provided" do expect { subject.box_version_revoke(username: username, version: version) }. to raise_error(ArgumentError) end it "should require version is provided" do expect { subject.box_version_revoke(username: username, name: name) }. to raise_error(ArgumentError) end it "should request the version revoke" do expect(subject).to receive(:request) do |args| expect(args[:path]).to include("revoke") end subject.box_version_revoke(username: username, name: name, version: version) end it "should make request using PUT method" do expect(subject).to receive(:request).with(hash_including(method: :put)) subject.box_version_revoke(username: username, name: name, version: version) end end describe "#box_version_provider_get" do let(:username) { double("username") } let(:name) { double("name") } let(:version) { double("version") } let(:provider) { double("provider") } let(:architecture) { "TEST_ARCHITECTURE" } before { allow(subject).to receive(:request) } it "should require username is provided" do expect { subject.box_version_provider_get(name: name, version: version, provider: provider) }. to raise_error(ArgumentError) end it "should require name is provided" do expect { subject.box_version_provider_get(username: username, version: version, provider: provider) }. to raise_error(ArgumentError) end it "should require version is provided" do expect { subject.box_version_provider_get(username: username, name: name, provider: provider) }. to raise_error(ArgumentError) end it "should require provider is provided" do expect { subject.box_version_provider_get(username: username, name: name, version: version) }. to raise_error(ArgumentError) end it "should include architecture when provided" do expect(subject).to receive(:request) do |args| expect(args[:path]).to include(architecture) end subject.box_version_provider_get( username: username, name: name, version: version, provider: provider, architecture: architecture ) end it "should request the box version provider" do expect(subject).to receive(:request) subject.box_version_provider_get( username: username, name: name, version: version, provider: provider ) end end describe "#box_version_provider_create" do let(:username) { double("username") } let(:name) { double("name") } let(:version) { double("version") } let(:provider) { double("provider") } let(:url) { double("url") } let(:checksum) { double("checksum") } let(:checksum_type) { double("checksum_type") } let(:architecture) { double("architecture") } let(:default_architecture) { double("default_architecture") } before { allow(subject).to receive(:request) } it "should require username is provided" do expect { subject.box_version_provider_create( name: name, version: version, provider: provider) }.to raise_error(ArgumentError) end it "should require name is provided" do expect { subject.box_version_provider_create( username: username, version: version, provider: provider) }.to raise_error(ArgumentError) end it "should require version is provided" do expect { subject.box_version_provider_create( username: username, name: name, provider: provider) }.to raise_error(ArgumentError) end it "should require provider is provided" do expect { subject.box_version_provider_create( username: username, name: name, version: version ) }.to raise_error(ArgumentError) end it "should create the box version provider" do expect(subject).to receive(:request) subject.box_version_provider_create( username: username, name: name, version: version, provider: provider ) end it "should create the box version provider with POST method" do expect(subject).to receive(:request).with(hash_including(method: :post)) subject.box_version_provider_create( username: username, name: name, version: version, provider: provider ) end it "should include url if provided" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :provider, :url)).to eq(url) end subject.box_version_provider_create( username: username, name: name, version: version, provider: provider, url: url ) end it "should include checksum if provided" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :provider, :checksum)).to eq(checksum) end subject.box_version_provider_create( username: username, name: name, version: version, provider: provider, checksum: checksum ) end it "should include checksum_type if provided" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :provider, :checksum_type)).to eq(checksum_type) end subject.box_version_provider_create( username: username, name: name, version: version, provider: provider, checksum_type: checksum_type ) end context "architecture" do context "when not included" do after do subject.box_version_provider_create( username: username, name: name, version: version, provider: provider, checksum_type: checksum_type ) end it "should not be in params" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :provider).key?(:architecture)).to be_falsey end end it "should call the v1 API" do expect(subject).to receive(:request) do |args| expect(args[:api_version]).to eq(1) end end end context "when included" do after do subject.box_version_provider_create( username: username, name: name, version: version, provider: provider, checksum_type: checksum_type, architecture: architecture ) end it "should be in params" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :provider, :architecture)).to eq(architecture) end end it "should call the v2 API" do expect(subject).to receive(:request) do |args| expect(args[:api_version]).to eq(2) end end end end context "default architecture" do it "should be default nil value when not provided" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :provider, :default_architecture)).to eq(VagrantCloud::Data::Nil) end subject.box_version_provider_create( username: username, name: name, version: version, provider: provider, architecture: architecture, checksum_type: checksum_type ) end context "when value is true" do it "should include default architecture as true" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :provider, :default_architecture)).to be_truthy end subject.box_version_provider_create( username: username, name: name, version: version, provider: provider, checksum_type: checksum_type, architecture: architecture, default_architecture: true ) end end context "when value is false" do it "should include default architecture as false" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :provider, :default_architecture)).to be(false) end subject.box_version_provider_create( username: username, name: name, version: version, provider: provider, checksum_type: checksum_type, architecture: architecture, default_architecture: false ) end end context "when architecture is not provided" do it "should not be included" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :provider).key?(:default_architecture)).to be_falsey end subject.box_version_provider_create( username: username, name: name, version: version, provider: provider, checksum_type: checksum_type, default_architecture: false ) end end end end describe "#box_version_provider_update" do let(:username) { double("username") } let(:name) { double("name") } let(:version) { double("version") } let(:provider) { double("provider") } let(:url) { double("url") } let(:checksum) { double("checksum") } let(:checksum_type) { double("checksum_type") } let(:architecture) { "TEST_ARCHITECTURE" } let(:new_architecture) { double("new_architecture") } before { allow(subject).to receive(:request) } it "should require username is provided" do expect { subject.box_version_provider_update( name: name, version: version, provider: provider ) }.to raise_error(ArgumentError) end it "should require name is provided" do expect { subject.box_version_provider_update( username: username, version: version, provider: provider) }.to raise_error(ArgumentError) end it "should require version is provided" do expect { subject.box_version_provider_update( username: username, name: name, provider: provider ) }.to raise_error(ArgumentError) end it "should require provider is provided" do expect { subject.box_version_provider_update( username: username, name: name, version: version ) }.to raise_error(ArgumentError) end it "should update the box version provider" do expect(subject).to receive(:request) subject.box_version_provider_update( username: username, name: name, version: version, provider: provider ) end it "should update the box version provider with PUT method" do expect(subject).to receive(:request).with(hash_including(method: :put)) subject.box_version_provider_update( username: username, name: name, version: version, provider: provider ) end it "should include url if provided" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :provider, :url)).to eq(url) end subject.box_version_provider_update( username: username, name: name, version: version, provider: provider, url: url ) end it "should include checksum if provided" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :provider, :checksum)).to eq(checksum) end subject.box_version_provider_update( username: username, name: name, version: version, provider: provider, checksum: checksum ) end it "should include checksum_type if provided" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :provider, :checksum_type)).to eq(checksum_type) end subject.box_version_provider_update( username: username, name: name, version: version, provider: provider, checksum_type: checksum_type ) end context "architecture" do context "when provided" do after do subject.box_version_provider_update( username: username, name: name, version: version, provider: provider, architecture: architecture, checksum_type: checksum_type ) end it "should be included in the path" do expect(subject).to receive(:request) do |args| expect(args[:path]).to include(architecture) end end it "should use v2 API" do expect(subject).to receive(:request) do |args| expect(args[:api_version]).to eq(2) end end end context "when not provided" do after do subject.box_version_provider_update( username: username, name: name, version: version, provider: provider, checksum_type: checksum_type ) end it "should use v1 API" do expect(subject).to receive(:request) do |args| expect(args[:api_version]).to eq(1) end end end end context "new architecture" do context "when architecture is provided" do it "should be default nil value when not provided" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :provider, :architecture)).to eq(VagrantCloud::Data::Nil) end subject.box_version_provider_update( username: username, name: name, version: version, provider: provider, checksum_type: checksum_type, architecture: architecture ) end it "should be included when provided" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :provider, :architecture)).to eq(new_architecture) end subject.box_version_provider_update( username: username, name: name, version: version, provider: provider, checksum_type: checksum_type, architecture: architecture, new_architecture: new_architecture ) end end context "when architecture is not provided" do it "should not be included in params" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :provider).key?(:new_architecture)).to be(false) end subject.box_version_provider_update( username: username, name: name, version: version, provider: provider, checksum_type: checksum_type, new_architecture: new_architecture ) end end end context "default architecture" do context "when architecture is provided" do it "should be default nil value when not provided" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :provider, :default_architecture)).to eq(VagrantCloud::Data::Nil) end subject.box_version_provider_update( username: username, name: name, version: version, provider: provider, checksum_type: checksum_type, architecture: architecture, ) end context "when value is true" do it "should include default architecture as true" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :provider, :default_architecture)).to be_truthy end subject.box_version_provider_update( username: username, name: name, version: version, provider: provider, checksum_type: checksum_type, architecture: architecture, default_architecture: true ) end end context "when value is false" do it "should include default architecture as false" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :provider, :default_architecture)).to be(false) end subject.box_version_provider_update( username: username, name: name, version: version, provider: provider, checksum_type: checksum_type, architecture: architecture, default_architecture: false ) end end end context "when architecture is not provided" do it "should not be included in params" do expect(subject).to receive(:request) do |args| expect(args.dig(:params, :provider).key?(:default_architecture)).to be(false) end subject.box_version_provider_update( username: username, name: name, version: version, provider: provider, checksum_type: checksum_type, default_architecture: true ) end end end end describe "#box_version_provider_delete" do let(:username) { double("username") } let(:name) { double("name") } let(:version) { double("version") } let(:provider) { double("provider") } let(:architecture) { "TEST_ARCHITECTURE" } before { allow(subject).to receive(:request) } it "should require username is provided" do expect { subject.box_version_provider_delete( name: name, version: version, provider: provider) }.to raise_error(ArgumentError) end it "should require name is provided" do expect { subject.box_version_provider_delete( username: username, version: version, provider: provider) }.to raise_error(ArgumentError) end it "should require version is provided" do expect { subject.box_version_provider_delete( username: username, name: name, provider: provider ) }.to raise_error(ArgumentError) end it "should require provider is provided" do expect { subject.box_version_provider_delete( username: username, name: name, version: version) }.to raise_error(ArgumentError) end it "should delete the box version provider" do expect(subject).to receive(:request) subject.box_version_provider_delete( username: username, name: name, version: version, provider: provider ) end it "should delete the box version provider with DELETE method" do expect(subject).to receive(:request).with(hash_including(method: :delete)) subject.box_version_provider_delete( username: username, name: name, version: version, provider: provider ) end context "with architecture" do after do subject.box_version_provider_delete( username: username, name: name, version: version, provider: provider, architecture: architecture ) end it "should include architecture when provided" do expect(subject).to receive(:request) do |args| expect(args[:path]).to include(architecture) end end it "should use v2 API" do expect(subject).to receive(:request) do |args| expect(args[:api_version]).to eq(2) end end end context "without architecture" do it "should use v1 API" do expect(subject).to receive(:request) do |args| expect(args[:api_version]).to eq(1) end subject.box_version_provider_delete( username: username, name: name, version: version, provider: provider, ) end end end describe "#box_version_provider_upload" do let(:username) { double("username") } let(:name) { double("name") } let(:version) { double("version") } let(:provider) { double("provider") } let(:architecture) { "TEST_ARCHITECTURE" } before { allow(subject).to receive(:request) } it "should require username is provided" do expect { subject.box_version_provider_upload( name: name, version: version, provider: provider ) }.to raise_error(ArgumentError) end it "should require name is provided" do expect { subject.box_version_provider_upload( username: username, version: version, provider: provider ) }.to raise_error(ArgumentError) end it "should require version is provided" do expect { subject.box_version_provider_upload( username: username, name: name, provider: provider ) }.to raise_error(ArgumentError) end it "should require provider is provided" do expect { subject.box_version_provider_upload( username: username, name: name, version: version ) }.to raise_error(ArgumentError) end it "should send upload request for the box version provider" do expect(subject).to receive(:request) subject.box_version_provider_upload( username: username, name: name, version: version, provider: provider ) end it "should request the box version provider upload with GET method" do expect(subject).to receive(:request).with(hash_including(method: :get)) subject.box_version_provider_upload( username: username, name: name, version: version, provider: provider ) end context "with architecture" do after do subject.box_version_provider_upload( username: username, name: name, version: version, provider: provider, architecture: architecture ) end it "should include architecture when provided" do expect(subject).to receive(:request) do |args| expect(args[:path]).to include(architecture) end end it "should use v2 API" do expect(subject).to receive(:request) do |args| expect(args[:api_version]).to eq(2) end end end context "without architecture" do it "should use v1 API" do expect(subject).to receive(:request) do |args| expect(args[:api_version]).to eq(1) end subject.box_version_provider_upload( username: username, name: name, version: version, provider: provider ) end end it "should request the upload path" do expect(subject).to receive(:request) do |args| expect(args[:path]).to include("upload") end subject.box_version_provider_upload( username: username, name: name, version: version, provider: provider ) end end describe "#box_version_provider_upload_direct" do let(:username) { double("username") } let(:name) { double("name") } let(:version) { double("version") } let(:provider) { double("provider") } let(:architecture) { "TEST_ARCHITECTURE" } before { allow(subject).to receive(:request) } it "should require username is provided" do expect { subject.box_version_provider_upload_direct( name: name, version: version, provider: provider ) }.to raise_error(ArgumentError) end it "should require name is provided" do expect { subject.box_version_provider_upload_direct( username: username, version: version, provider: provider ) }.to raise_error(ArgumentError) end it "should require version is provided" do expect { subject.box_version_provider_upload_direct( username: username, name: name, provider: provider ) }.to raise_error(ArgumentError) end it "should require provider is provided" do expect { subject.box_version_provider_upload_direct( username: username, name: name, version: version ) }.to raise_error(ArgumentError) end it "should send upload request for the box version provider" do expect(subject).to receive(:request) subject.box_version_provider_upload_direct( username: username, name: name, version: version, provider: provider ) end it "should request the box version provider upload with GET method" do expect(subject).to receive(:request).with(hash_including(method: :get)) subject.box_version_provider_upload_direct( username: username, name: name, version: version, provider: provider ) end it "should request the upload path" do expect(subject).to receive(:request) do |args| expect(args[:path]).to include("upload") end subject.box_version_provider_upload_direct( username: username, name: name, version: version, provider: provider ) end context "with architecture" do after do subject.box_version_provider_upload_direct( username: username, name: name, version: version, provider: provider, architecture: architecture ) it "should be included in path" do expect(subject).to receive(:request) do |args| expect(args[:path]).to include(architecture) end end it "should use v2 API" do expect(subject).to receive(:request) do |args| expect(args[:api_version]).to eq(2) end end end end context "without architecture" do it "should use v1 API" do expect(subject).to receive(:request) do |args| expect(args[:api_version]).to eq(1) end subject.box_version_provider_upload_direct( username: username, name: name, version: version, provider: provider, ) end end end end vagrant_cloud-3.1.3/spec/unit/vagrant_cloud/data_spec.rb000066400000000000000000000255451477154370500234070ustar00rootroot00000000000000require 'spec_helper' require 'vagrant_cloud' describe VagrantCloud::Data::NilClass do let(:subject) { described_class.instance } it "should be a singleton" do expect(described_class.ancestors).to include(Singleton) end it "should be nil?" do expect(subject.nil?).to be_truthy end it "should == nil" do expect(subject == nil).to be_truthy end it "should === nil" do expect(subject === nil).to be_truthy end it "should equal? nil" do expect(subject.equal?(nil)).to be_truthy end it "should convert to 0" do expect(subject.to_i).to eq(0) end it "should convert to 0.0" do expect(subject.to_f).to eq(0.0) end it "should convert to empty array" do expect(subject.to_a).to eq([]) end it "should convert to empty hash" do expect(subject.to_h).to eq({}) end it "should convert to empty string" do expect(subject.to_s).to eq("") end it "should & to false" do expect(subject & :value).to be_falsey end it "should | to false" do expect(subject | :value).to be_falsey end it "should ^ to false" do expect(subject ^ :value).to be_falsey end it "should inspect to nil string" do expect(subject.inspect).to eq("nil") end end describe VagrantCloud::Data do describe "#initialize" do it "should accept no arguments when creating" do expect { described_class.new }.not_to raise_error end it "should accept arguments when creating" do expect { described_class.new(value: true) }.not_to raise_error end end describe "#[]" do it "should provide access to arguments" do instance = described_class.new(value: 1) expect(instance[:value]).to eq(1) end it "should return custom nil when argument is not defined" do instance = described_class.new(value: 1) expect(instance[:other_value]).to eq(VagrantCloud::Data::Nil) end end end describe VagrantCloud::Data::Immutable do context "with required attributes" do let(:described_class) do @c ||= Class.new(VagrantCloud::Data::Immutable) do attr_required :password end end it "should error if required argument is not provided" do expect { described_class.new }.to raise_error(ArgumentError) end it "should not error if required argument is provided" do expect { described_class.new(password: "value") }.not_to raise_error end it "should add an accessor to retrieve the value" do instance = described_class.new(password: "value") expect(instance.password).to eq("value") end it "should retrieve value via [] using a symbol" do instance = described_class.new(password: "value") expect(instance[:password]).to eq("value") end it "should retrieve value via [] using a string" do instance = described_class.new(password: "value") expect(instance["password"]).to eq("value") end it "should not allow value to be modified" do instance = described_class.new(password: "value") expect { instance.password.replace("new-value") }.to raise_error(FrozenError) end end context "with optional attributes" do let(:described_class) do @c ||= Class.new(VagrantCloud::Data::Immutable) do attr_optional :username end end it "should not error if argument is not provided" do expect { described_class.new(username: "value") }.not_to raise_error end it "should add an accessor to retrieve the value" do instance = described_class.new(username: "value") expect(instance.username).to eq("value") end it "should retrieve value via [] using a symbol" do instance = described_class.new(username: "value") expect(instance[:username]).to eq("value") end it "should retrieve value via [] using a string" do instance = described_class.new(username: "value") expect(instance["username"]).to eq("value") end it "should not allow value to be modified" do instance = described_class.new(username: "value") expect { instance.username.replace("new-value") }.to raise_error(FrozenError) end it "should return custom nil via accessor when unset" do instance = described_class.new expect(instance.username).to eq(VagrantCloud::Data::Nil) end it "should return custom nil via []" do instance = described_class.new expect(instance[:username]).to eq(VagrantCloud::Data::Nil) end end context "with optional and required attributes" do let(:described_class) do @c ||= Class.new(VagrantCloud::Data::Immutable) do attr_required :password attr_optional :username end end it "should error if invalid argument is provided" do expect { described_class.new(password: "pass", other: true) }.to raise_error(ArgumentError) end it "should error if no arguments are provided" do expect { described_class.new }.to raise_error(ArgumentError) end it "should error if only optional argument is provided" do expect { described_class.new(username: "user") }.to raise_error(ArgumentError) end it "should not error if required argument is provided" do expect { described_class.new(password: "pass") }.not_to raise_error end it "should not error if both required and optional arguments are provided" do expect { described_class.new(password: "pass", username: "user") }.not_to raise_error end end end describe VagrantCloud::Data::Mutable do let(:username) { "U" } let(:password) { "P" } context ".load" do let(:described_class) do @c ||= Class.new(VagrantCloud::Data::Mutable) do attr_reader :key attr_required :password attr_optional :username def initialize(key:, **opts) super(**opts) @key = key end end end it "should create a new instance" do data = {password: "pass", key: "key"} instance = described_class.load(data) expect(instance).to be_a(described_class) end it "should set the information provided in the data hash" do data = {password: "pass", key: "key"} instance = described_class.load(data) expect(instance.password).to eq("pass") expect(instance.key).to eq("key") end it "should ignore extra information in the hash" do data = {password: "pass", key: "key", invalid: true} instance = described_class.load(data) expect(instance.password).to eq("pass") expect(instance.key).to eq("key") end end describe "#freeze" do it "should return self" do expect(subject.freeze).to eq(subject) end it "should not freeze" do expect(subject.freeze).not_to be_frozen end end context "with no mutables defined" do let(:described_class) do @c ||= Class.new(VagrantCloud::Data::Mutable) do attr_required :password attr_optional :username end end let(:subject) { described_class.new(username: username, password: password) } it "should not allow setting optional value" do expect { subject.username = "new-value" }.to raise_error(NoMethodError) end it "should not allow setting required value" do expect { subject.password = "new-value" }.to raise_error(NoMethodError) end end context "with mutables defined" do let(:described_class) do @c ||= Class.new(VagrantCloud::Data::Mutable) do attr_required :password attr_optional :username attr_mutable :username, :password end end let(:subject) { described_class.new(username: username, password: password) } it "should allow setting optional value" do expect { subject.username = "new-value" }.not_to raise_error end it "should allow setting required value" do expect { subject.password = "new-value" }.not_to raise_error end it "should return updated optional value" do expect(subject.username).to eq(username) subject.username = "new-value" expect(subject.username).to eq("new-value") end it "should return updated required value" do expect(subject.password).to eq(password) subject.password = "new-value" expect(subject.password).to eq("new-value") end context "#dirty?" do it "should mark the instance as dirty when value is updated" do subject.username = "new-value" expect(subject.dirty?).to be_truthy end it "should mark the field as dirty with value is updated" do subject.username = "new-value" expect(subject.dirty?(:username)).to be_truthy end it "should not mark other fields dirty when not updated" do subject.username = "new-value" expect(subject.dirty?(:username)).to be_truthy expect(subject.dirty?(:password)).to be_falsey end end context "#clean!" do it "should make a dirty instance clean" do subject.username = "new-value" expect(subject.dirty?).to be_truthy subject.clean! expect(subject.dirty?).to be_falsey end it "should make clean values non-modifyable" do subject.username = "new-value" subject.clean! expect { subject.username.replace("testing") }.to raise_error(FrozenError) end end context "#clean" do it "should update values with provided data" do expect(subject.username).to eq(username) expect(subject.password).to eq(password) subject.clean(data: {username: "new-user", password: "new-pass"}) expect(subject.username).to eq("new-user") expect(subject.password).to eq("new-pass") end it "should update instance so it is non-dirty" do expect(subject.dirty?).to be_falsey subject.clean(data: {username: "new-user", password: "new-pass"}) expect(subject.dirty?).to be_falsey end it "should ignore fields of data provided" do expect(subject.username).to eq(username) expect(subject.password).to eq(password) subject.clean(data: {username: "new-user", password: "new-pass"}, ignores: :password) expect(subject.username).to eq("new-user") expect(subject.password).to eq(password) end it "should only update requested fields of data provided" do expect(subject.username).to eq(username) expect(subject.password).to eq(password) subject.clean(data: {username: "new-user", password: "new-pass"}, only: :password) expect(subject.username).to eq(username) expect(subject.password).to eq("new-pass") end it "should make clean values non-modifyable" do subject.clean(data: {username: "new-user", password: "new-pass"}, only: :password) subject.clean! expect { subject.username.replace("testing") }.to raise_error(FrozenError) end it "should error if data provided is not a hash" do expect { subject.clean(data: nil) }.to raise_error(TypeError) end end end end vagrant_cloud-3.1.3/spec/unit/vagrant_cloud/instrumentor/000077500000000000000000000000001477154370500236755ustar00rootroot00000000000000vagrant_cloud-3.1.3/spec/unit/vagrant_cloud/instrumentor/collection_spec.rb000066400000000000000000000152701477154370500273740ustar00rootroot00000000000000require "spec_helper" require "vagrant_cloud" describe VagrantCloud::Instrumentor::Collection do describe "#initialize" do it "should be a Core type" do expect(subject).to be_a(VagrantCloud::Instrumentor::Core) end it "should have a logger instrumentor by default" do expect(subject.instrumentors.first).to be_a(VagrantCloud::Instrumentor::Logger) end it "should accept additional instrumentors" do addition = VagrantCloud::Instrumentor::Core.new instance = described_class.new(instrumentors: [addition]) expect(instance.instrumentors).to include(addition) end it "should accept single additional instrumentor" do addition = VagrantCloud::Instrumentor::Core.new instance = described_class.new(instrumentors: addition) expect(instance.instrumentors).to include(addition) end end describe "#add" do it "should add a new instrumentor" do addition = VagrantCloud::Instrumentor::Core.new subject.add(addition) expect(subject.instrumentors).to include(addition) end it "should return self" do addition = VagrantCloud::Instrumentor::Core.new expect(subject.add(addition)).to eq(subject) end it "should only add an instrumentor instance once" do addition = VagrantCloud::Instrumentor::Core.new subject.add(addition).add(addition) expect(subject.instrumentors.count{|i| i == addition}).to eq(1) end it "should error if instance is not an instrumentor" do expect { subject.add("string") }.to raise_error(TypeError) end it "should freeze instrumentors after adding" do addition = VagrantCloud::Instrumentor::Core.new subject.add(addition).add(addition) expect(subject.instrumentors).to be_frozen end end describe "#remove" do it "should remove an instrumentor" do addition = VagrantCloud::Instrumentor::Core.new subject.add(addition).add(addition) expect(subject.instrumentors).to include(addition) subject.remove(addition) expect(subject.instrumentors).not_to include(addition) end it "should return self" do expect(subject.remove(nil)).to eq(subject) end it "should freeze instrumentors after removing" do addition = VagrantCloud::Instrumentor::Core.new subject.add(addition).add(addition) subject.remove(addition) expect(subject.instrumentors).to be_frozen end end describe "#subscribe" do it "should add a new subscription entry" do expect(subject.subscriptions.size).to eq(0) subject.subscribe("event", proc{}) expect(subject.subscriptions.size).to eq(1) end it "should freeze subscriptions after subscribing" do subject.subscribe("event", proc{}) expect(subject.subscriptions).to be_frozen end it "should return self" do expect(subject.subscribe("event", proc{})).to eq(subject) end it "should error if callable is not provided" do expect { subject.subscribe("event") }.to raise_error(TypeError) end it "should error if non-callable is provided" do expect { subject.subscribe("event", :thing) }.to raise_error(TypeError) end it "should error if callable and block are provided" do expect { subject.subscribe("event", proc{}){} }.to raise_error(ArgumentError) end it "should add with callable argument" do expect { subject.subscribe("event", proc{}) }.not_to raise_error end it "should add with block" do expect { subject.subscribe("event"){} }.not_to raise_error end end describe "#unsubscribe" do it "should remove entry using callable instance" do callable = proc{} subject.subscribe("event", callable) expect(subject.subscriptions.count).to eq(1) subject.unsubscribe(callable) expect(subject.subscriptions).to be_empty end it "should freeze subscriptions after remove" do callable = proc{} subject.subscribe("event", callable) expect(subject.subscriptions.count).to eq(1) subject.unsubscribe(callable) expect(subject.subscriptions).to be_frozen end it "should return self" do callable = proc{} subject.subscribe("event", callable) expect(subject.subscriptions.count).to eq(1) expect(subject.unsubscribe(callable)).to eq(subject) end end describe "#instrument" do let(:logger) { double("logger") } let(:event) { "event" } let(:params) { {} } before do allow(VagrantCloud::Instrumentor::Logger). to receive(:new).and_return(logger) allow(logger).to receive(:instrument) end it "should call the logger instrumentor" do expect(logger).to receive(:instrument).with(event, params) subject.instrument(event, params) end it "should yield when a block is provided" do run = false subject.instrument(event, params) do run = true end expect(run).to be_truthy end it "should return the result of the block when provided" do expect(subject.instrument(event, params){ :result }).to eq(:result) end it "should add timing information to params" do subject.instrument(event, params) expect(params).to have_key(:timing) end it "should provide duration timing" do expect(Time).to receive(:now).and_return(Time.now - 5) expect(Time).to receive(:now).and_call_original subject.instrument(event, params) expect(params.dig(:timing, :duration)).to be_within(0.01).of(5) end context "when a subscription is added with exact name match" do it "should call the subscription" do run = false callable = proc { run = true } subject.subscribe(event, callable) subject.instrument(event, params) expect(run).to be_truthy end end context "when a subscription is added with regex name match" do it "should call the subscription" do run = false callable = proc { run = true } subject.subscribe(/ev/, callable) subject.instrument(event, params) expect(run).to be_truthy end end context "when a subscription is added with exact name not matching" do it "should not call the subscription" do run = false callable = proc { run = true } subject.subscribe("other", callable) subject.instrument(event, params) expect(run).to be_falsey end end context "when a subscription is added with regex name not matching" do it "should not call the subscription" do run = false callable = proc { run = true } subject.subscribe(/ot/, callable) subject.instrument(event, params) expect(run).to be_falsey end end end end vagrant_cloud-3.1.3/spec/unit/vagrant_cloud/instrumentor/core_spec.rb000066400000000000000000000003761477154370500261720ustar00rootroot00000000000000require "spec_helper" require "vagrant_cloud" describe VagrantCloud::Instrumentor::Core do context "#instrument" do it "should raise NotImplementedError" do expect { subject.instrument }.to raise_error(NotImplementedError) end end end vagrant_cloud-3.1.3/spec/unit/vagrant_cloud/instrumentor/logger_spec.rb000066400000000000000000000126631477154370500265230ustar00rootroot00000000000000require "spec_helper" require "vagrant_cloud" describe VagrantCloud::Instrumentor::Logger do let(:logger) { double("logger") } before { allow(subject).to receive(:logger).and_return(logger) } it "should be a subclass of Core" do expect(described_class.ancestors).to include(VagrantCloud::Instrumentor::Core) end describe "#instrument" do before do allow(logger).to receive(:debug).and_yield allow(logger).to receive(:info).and_yield allow(logger).to receive(:error).and_yield end context "errors" do let(:name) { "test.error" } it "should output error message when event type is error" do expect(logger).to receive(:error) subject.instrument(name) end it "should include namespace and event type in output" do expect(logger).to receive(:error) do |&b| expect(b.call).to match(/test ERROR/) end subject.instrument(name) end it "should include optional error message if provided" do expect(logger).to receive(:error) do |&b| expect(b.call).to match(/test ERROR custom message/) end subject.instrument(name, error: "custom message") end it "should not proceed any farther" do expect(logger).not_to receive(:info) expect(logger).not_to receive(:debug) subject.instrument(name) end end context "non-excon namespaced events" do let(:name) { "test.action" } let(:params) { {} } after { subject.instrument(name, params) } it "should include namespace in info output" do expect(logger).to receive(:info) do |&b| expect(b.call).to include("test") end end it "should include namespace in debug output" do expect(logger).to receive(:debug) do |&b| expect(b.call).to include("test") end end it "should include the event name in the info output upcased" do expect(logger).to receive(:info) do |&b| expect(b.call).to include("ACTION") end end it "should include the event name in the debug output upcased" do expect(logger).to receive(:debug) do |&b| expect(b.call).to include("ACTION") end end it "should format output to the logger" do # debug format expect(subject).to receive(:format_output).with(params) # info format expect(subject).to receive(:format_output).with(anything) end context "when params include content" do let(:params) { {value: true, testing: "a-value"} } it "should include params in the info output" do expect(logger).to receive(:info) do |&b| result = b.call expect(result).to include("testing=\"a-value\"") expect(result).to include("value=true") end end it "should include params in the debug output" do expect(logger).to receive(:debug) do |&b| result = b.call expect(result).to include("testing=\"a-value\"") expect(result).to include("value=true") end end end end context "excon namespaced events" do let(:name) { "excon.action" } let(:params) { {data: nil} } after { subject.instrument(name, params) } it "should call #excon to filter parameters" do expect(subject).to receive(:excon).with(anything, params).and_return({}) end it "should send event type to #excon" do expect(subject).to receive(:excon).with("action", anything).and_return({}) end it "should output all parameters via debug" do allow(subject).to receive(:format_output) expect(subject).to receive(:format_output).with(params) end end end describe "excon" do let(:action) { double("action") } let(:params) { {} } let(:redacted) { described_class.const_get(:REDACTED) } it "should return hash with duration" do expect(subject.excon(action, params)).to have_key(:duration) end context "when parameters include password" do let(:params) { {password: "my-password"} } it "should redact the password value" do subject.excon(action, params) expect(params[:password]).to eq(redacted) end end context "when parameters include proxy password" do let(:params) { {proxy: {password: "my-password"}} } it "should redact the password value" do subject.excon(action, params) expect(params.dig(:proxy, :password)).to eq(redacted) end end context "when parameters include access token" do let(:params) { {access_token: "my-token"} } it "should redact the access token value" do subject.excon(action, params) expect(params[:access_token]).to eq(redacted) end end context "when parameters include authorization header" do let(:params) { {headers: {"Authorization" => "value"}} } it "should redact the authorization header value" do subject.excon(action, params) expect(params.dig(:headers, "Authorization")).to eq(redacted) end end context "when parameters include proxy authorization header" do let(:params) { {headers: {"Proxy-Authorization" => "value"}} } it "should redact the authorization header value" do subject.excon(action, params) expect(params.dig(:headers, "Proxy-Authorization")).to eq(redacted) end end end end vagrant_cloud-3.1.3/spec/unit/vagrant_cloud/organization_spec.rb000066400000000000000000000043651477154370500251770ustar00rootroot00000000000000require "spec_helper" require "vagrant_cloud" describe VagrantCloud::Organization do let(:account) { double("account") } let(:username) { "USERNAME" } let(:subject) { described_class.new(account: account, username: username) } describe "#initialize" do it "should error if account is not provided" do expect { described_class.new(username: username) }.to raise_error(ArgumentError) end it "should error if username is not provided" do expect { described_class.new(account: account) }.to raise_error(ArgumentError) end end describe "#add_box" do it "should create a new box" do expect(subject.add_box("test")).to be_a(VagrantCloud::Box) end it "should add box to the collection" do expect(subject.boxes).to be_empty subject.add_box("test") expect(subject.boxes).not_to be_empty end it "should error if box name already exists" do subject.add_box("test") expect { subject.add_box("test") }. to raise_error(VagrantCloud::Error::BoxError::BoxExistsError) end end describe "#dirty?" do it "should return false by default" do expect(subject.dirty?).to be_falsey end it "should check dirtiness based on attribute" do expect(subject.dirty?(:username)).to be_falsey end context "deep check" do it "should return false by default" do expect(subject.dirty?(deep: true)).to be_falsey end context "with box collection of one clean box" do before do b = subject.add_box("test") b.clean(data: {created_at: Time.now.to_s}) subject.clean! end it "should return false" do expect(subject.dirty?(deep: true)).to be_falsey end context "with a dirty box in collection" do before { subject.add_box("test2") } it "should return true" do expect(subject.dirty?(deep: true)).to be_truthy end end end end end describe "#save" do it "should return self" do expect(subject.save).to eq(subject) end context "with boxes" do it "should save boxes" do b = subject.add_box("test") expect(b).to receive(:save) subject.save end end end end vagrant_cloud-3.1.3/spec/unit/vagrant_cloud/response/000077500000000000000000000000001477154370500227625ustar00rootroot00000000000000vagrant_cloud-3.1.3/spec/unit/vagrant_cloud/response/create_token_spec.rb000066400000000000000000000035641477154370500267740ustar00rootroot00000000000000require 'spec_helper' require 'vagrant_cloud' describe VagrantCloud::Response::CreateToken do let(:token) { "token" } let(:token_hash) { "token_hash" } let(:created_at) { Time.now.to_s } let(:description) { "description" } let(:args) { {token: token, token_hash: token_hash, created_at: created_at, description: description } } let(:subject) { described_class.new(**args) } describe "#initialize" do it "should create a new instance" do expect { subject }.not_to raise_error end it "should require token" do args.delete(:token) expect { subject }.to raise_error(ArgumentError) end it "should require token_hash" do args.delete(:token_hash) expect { subject }.to raise_error(ArgumentError) end it "should require created_at" do args.delete(:created_at) expect { subject }.to raise_error(ArgumentError) end it "should require description" do args.delete(:description) expect { subject }.to raise_error(ArgumentError) end end describe "#token" do it "should return a value" do expect(subject.token).to eq(token) end it "should freeze the value" do expect(subject.token).to be_frozen end end describe "#token_hash" do it "should return a value" do expect(subject.token_hash).to eq(token_hash) end it "should freeze the value" do expect(subject.token_hash).to be_frozen end end describe "#created_at" do it "should return a value" do expect(subject.created_at).to eq(created_at) end it "should freeze the value" do expect(subject.created_at).to be_frozen end end describe "#description" do it "should return a value" do expect(subject.description).to eq(description) end it "should freeze the value" do expect(subject.description).to be_frozen end end end vagrant_cloud-3.1.3/spec/unit/vagrant_cloud/response/request_2fa_spec.rb000066400000000000000000000012471477154370500265450ustar00rootroot00000000000000require 'spec_helper' require 'vagrant_cloud' describe VagrantCloud::Response::Request2FA do let(:subject) { described_class.new(destination: destination) } let(:destination) { "value" } describe "#initialize" do it "should create a new instance" do expect { subject }.not_to raise_error end it "should error if destination is not provided" do expect { described_class.new }.to raise_error(ArgumentError) end end describe "#destination" do it "should return a value" do expect(subject.destination).to eq(destination) end it "should freeze the value" do expect(subject.destination).to be_frozen end end end vagrant_cloud-3.1.3/spec/unit/vagrant_cloud/response/search_spec.rb000066400000000000000000000057211477154370500255730ustar00rootroot00000000000000require 'spec_helper' require 'vagrant_cloud' describe VagrantCloud::Response::Search do let(:client) { double("client", access_token: nil) } let(:account) { VagrantCloud::Account.new(client: client) } let(:params) { {} } let(:result) { {boxes: boxes} } let(:boxes) { [] } let(:searcher) { VagrantCloud::Search.new(account: account) } let(:subject) { described_class.new(account: account, params: params, **result) } before do allow(client).to receive(:is_a?).with(VagrantCloud::Client).and_return(true) allow(VagrantCloud::Search).to receive(:new).and_return(searcher) end describe "#initialize" do it "should error when account is not provided" do expect { described_class.new(params: params, **result) }. to raise_error(ArgumentError) end it "should error when params are not provided" do expect { described_class.new(account: account, **result) }. to raise_error(ArgumentError) end it "should error when account is not the right type" do expect { described_class.new(account: "value", params: params, **result) }. to raise_error(TypeError) end it "should load boxes" do expect_any_instance_of(described_class).to receive(:reload_boxes) subject end end describe "#page" do it "defaults to page 1" do expect(subject.page).to eq(1) end context "when page is set in params" do let(:params) { {page: 5} } it "should return the page value" do expect(subject.page).to eq(5) end end end describe "#previous" do it "should raise error when no previous page is available" do expect { subject.previous }.to raise_error(ArgumentError) end context "with previous pages available" do let(:params) { {page: 5} } let(:response) { double("response") } before { allow(searcher).to receive(:execute).and_return(response) } it "should request previous page through a searcher" do expect(searcher).to receive(:prev_page) subject.previous end end end describe "#next" do let(:response) { double("response") } before { allow(searcher).to receive(:execute).and_return(response) } it "should request next page through a searcher" do expect(searcher).to receive(:next_page) subject.next end end describe "#boxes" do let(:boxes) { [{tag: "org/box", name: "box", username: "org"}] } let(:organization) { VagrantCloud::Organization. new(account: account, username: "org") } before { allow(account).to receive(:organization). with(name: "org").and_return(organization) } it "should have a boxes count of 1" do expect(subject.boxes.count).to eq(1) end it "should contain a Box instance" do expect(subject.boxes.first).to be_a(VagrantCloud::Box) end it "should population the organization" do expect(subject.boxes.first.organization.boxes.first).to eq(subject.boxes.first) end end end vagrant_cloud-3.1.3/spec/unit/vagrant_cloud/search_spec.rb000066400000000000000000000144471477154370500237420ustar00rootroot00000000000000require 'spec_helper' require 'vagrant_cloud' describe VagrantCloud::Search do let(:account) { double("account", client: client_account) } let(:client) { double("client", access_token: nil) } let(:client_account) { double("client_account") } let(:client_with_token) { double("client_with_token", access_token: access_token) } let(:access_token) { double("access_token") } let(:response) { {boxes: boxes} } let(:boxes) { [] } before do allow(VagrantCloud::Client).to receive(:new). with(hash_including(access_token: nil)).and_return(client) allow(VagrantCloud::Client).to receive(:new). with(hash_including(access_token: access_token)).and_return(client_with_token) allow(account).to receive(:is_a?).with(VagrantCloud::Account).and_return(true) allow(client).to receive(:is_a?).with(VagrantCloud::Client).and_return(true) allow(client_with_token).to receive(:request).and_return({}) allow(client_with_token).to receive(:authentication_token_validate).and_return({}) allow(client).to receive(:search).and_return(response) end describe "#initialize" do it "should create a new instance without an access token" do expect(subject.account.client).to eq(client) end it "should create a new instance with a custom access token" do instance = described_class.new(access_token: access_token) expect(instance.account.client).to eq(client_with_token) end it "should create a new instance with an account" do instance = described_class.new(account: account) expect(instance.account.client).to eq(client_account) end it "should create a new instance with a client" do instance = described_class.new(client: client) expect(instance.account.client).to eq(client) end it "should error when more than one argument is provided" do expect { described_class.new(client: client, access_token: access_token) }. to raise_error(ArgumentError) end it "should error when client is not a client instance" do expect { described_class.new(client: "value") }. to raise_error(TypeError) end it "should error when account is not an account instance" do expect { described_class.new(account: "value") }. to raise_error(TypeError) end end describe "#search" do it "should execute the search request" do expect(subject).to receive(:execute) subject.search end it "should set instance to active after a search" do expect(subject.active?).to be_falsey subject.search expect(subject.active?).to be_truthy end it "should return a search response" do expect(subject.search).to be_a(VagrantCloud::Response::Search) end end describe "#next_page" do it "should error without active search" do expect { subject.next_page }.to raise_error(ArgumentError) end context "with active search" do before { subject.search } it "should not produce an error" do expect { subject.next_page }.not_to raise_error end it "should increment the page requested" do expect(subject).to receive(:execute).and_call_original expect(client).to receive(:search).with(hash_including(page: 2)). and_return(response) subject.next_page end it "should return a search response" do expect(subject.next_page).to be_a(VagrantCloud::Response::Search) end it "should persist the page number" do subject.next_page expect(client).to receive(:search).with(hash_including(page: 3)). and_return(response) subject.next_page end end end describe "#prev_page" do it "should error without active search" do expect { subject.prev_page }.to raise_error(ArgumentError) end context "with active search" do before { subject.search } it "should not produce an error" do expect { subject.prev_page }.not_to raise_error end it "should maintain page 1 when decrementing page is less than 1" do subject.prev_page expect(client).to receive(:search).with(hash_including(page: 1)). and_return(response) subject.prev_page end it "should return a search response" do expect(subject.prev_page).to be_a(VagrantCloud::Response::Search) end context "with active search on page 10" do before { subject.search(page: 10) } it "should request results from page 9" do expect(client).to receive(:search).with(hash_including(page: 9)). and_return(response) subject.prev_page end it "should persist page value and request page 8" do subject.prev_page expect(client).to receive(:search).with(hash_including(page: 8)). and_return(response) subject.prev_page end end end end describe "#active?" do context "without active search" do it "should be false" do expect(subject.active?).to be_falsey end end context "with active search" do before { subject.search } it "should be true" do expect(subject.active?).to be_truthy end end end describe "#clear!" do it "should return self" do expect(subject.clear!).to eq(subject) end it "should not be active after clearing" do subject.clear! expect(subject.active?).to be_falsey end context "with active search" do before { subject.search } it "should not be active after clearing" do expect(subject.active?).to be_truthy subject.clear! expect(subject.active?).to be_falsey end end end describe "#seed" do it "should return self" do expect(subject.seed(query: "test")).to eq(subject) end it "should make search instance active" do expect(subject.active?).to be_falsey subject.seed(query: "test") expect(subject.active?).to be_truthy end it "should not execute a request" do expect(subject).not_to receive(:execute) subject.seed(query: "test") end end describe "#from_response" do it "should yield a new search instance" do subject.from_response(subject.search) do |s| expect(s).not_to eq(subject) expect(s).to be_a(described_class) end end end end vagrant_cloud-3.1.3/tasks/000077500000000000000000000000001477154370500155105ustar00rootroot00000000000000vagrant_cloud-3.1.3/tasks/rspec.rake000066400000000000000000000002031477154370500174630ustar00rootroot00000000000000require 'rspec/core/rake_task' desc 'Run all specs' RSpec::Core::RakeTask.new(:spec) do |t| t.pattern = 'spec/**/*_spec.rb' end vagrant_cloud-3.1.3/vagrant_cloud.gemspec000066400000000000000000000015571477154370500205700ustar00rootroot00000000000000require_relative "lib/vagrant_cloud/version" Gem::Specification.new do |s| s.name = 'vagrant_cloud' s.version = VagrantCloud::VERSION.to_s s.summary = 'Vagrant Cloud API Library' s.description = 'Ruby library for the HashiCorp Vagrant Cloud API' s.authors = ['HashiCorp', 'Cargo Media'] s.email = 'vagrant@hashicorp.com' s.files = Dir['LICENSE*', 'README*', '{lib}/**/*'].reject { |f| f.end_with?('~') } s.homepage = 'https://github.com/hashicorp/vagrant_cloud' s.license = 'MIT' s.add_runtime_dependency 'excon', '~> 1.0' s.add_runtime_dependency 'log4r', '~> 1.1' s.add_runtime_dependency 'rexml', '~> 3.3' s.add_runtime_dependency 'oauth2', '~> 2.0' s.add_development_dependency 'rake', '~> 12.3' s.add_development_dependency 'rspec', '~> 3.0' s.add_development_dependency 'webmock', '~> 3.0' end vagrant_cloud-3.1.3/version.txt000066400000000000000000000000061477154370500166050ustar00rootroot000000000000003.1.3